]> review.fuel-infra Code Review - packages/trusty/i40e-dkms.git/commitdiff
Update i40e-dkms kernel module to 1.5.18 version for Ubuntu 60/20660/5 master
authorAlbert <asyriy@mirantis.com>
Wed, 11 May 2016 18:27:55 +0000 (21:27 +0300)
committerAlbert <asyriy@mirantis.com>
Fri, 13 May 2016 17:11:34 +0000 (20:11 +0300)
Change-Id: Ib393e4a63426ed28cf26c0f394d2e079d9a1ebbf
Closes-bug: #1581096

56 files changed:
debian/changelog
debian/prerm
debian/rules
i40e-dkms/i40e-1.3.47/SUMS [deleted file]
i40e-dkms/i40e-1.3.47/pci.updates [deleted file]
i40e-dkms/i40e-1.3.47/src/Makefile [deleted file]
i40e-dkms/i40e-1.3.47/src/i40e/Kbuild [deleted file]
i40e-dkms/i40e-1.3.47/src/i40e/i40e_configfs.c [deleted file]
i40e-dkms/i40e-1.5.18/COPYING [moved from i40e-dkms/i40e-1.3.47/COPYING with 100% similarity]
i40e-dkms/i40e-1.5.18/README [moved from i40e-dkms/i40e-1.3.47/README with 66% similarity]
i40e-dkms/i40e-1.5.18/SUMS [new file with mode: 0644]
i40e-dkms/i40e-1.5.18/dkms.conf [moved from i40e-dkms/i40e-1.3.47/dkms.conf with 58% similarity]
i40e-dkms/i40e-1.5.18/i40e.7 [moved from i40e-dkms/i40e-1.3.47/i40e.7 with 92% similarity]
i40e-dkms/i40e-1.5.18/i40e.spec [moved from i40e-dkms/i40e-1.3.47/i40e.spec with 97% similarity]
i40e-dkms/i40e-1.5.18/pci.updates [new file with mode: 0644]
i40e-dkms/i40e-1.5.18/scripts/dump_tables [moved from i40e-dkms/i40e-1.3.47/scripts/dump_tables with 97% similarity]
i40e-dkms/i40e-1.5.18/scripts/set_irq_affinity [moved from i40e-dkms/i40e-1.3.47/scripts/set_irq_affinity with 89% similarity]
i40e-dkms/i40e-1.5.18/src/Makefile [new file with mode: 0644]
i40e-dkms/i40e-1.5.18/src/Module.supported [moved from i40e-dkms/i40e-1.3.47/src/i40e/Module.supported with 100% similarity]
i40e-dkms/i40e-1.5.18/src/common.mk [new file with mode: 0644]
i40e-dkms/i40e-1.5.18/src/i40e.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e.h with 75% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_adminq.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_adminq.c with 95% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_adminq.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_adminq.h with 93% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_adminq_cmd.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_adminq_cmd.h with 91% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_alloc.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_alloc.h with 88% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_common.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_common.c with 82% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_dcb.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_dcb.c with 97% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_dcb.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_dcb.h with 96% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_dcb_nl.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_dcb_nl.c with 97% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_debugfs.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_debugfs.c with 87% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_devids.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_devids.h with 77% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_diag.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_diag.c with 95% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_diag.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_diag.h with 87% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_ethtool.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_ethtool.c with 65% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_fcoe.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_fcoe.c with 97% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_fcoe.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_fcoe.h with 94% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_helper.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_helper.h with 88% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_hmc.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_hmc.c with 97% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_hmc.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_hmc.h with 97% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_lan_hmc.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_lan_hmc.c with 99% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_lan_hmc.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_lan_hmc.h with 95% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_main.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_main.c with 89% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_nvm.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_nvm.c with 87% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_osdep.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_osdep.h with 82% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_prototype.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_prototype.h with 85% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_ptp.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_ptp.c with 98% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_register.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_register.h with 60% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_status.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_status.h with 92% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_txrx.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_txrx.c with 61% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_txrx.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_txrx.h with 62% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_type.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_type.h with 92% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_virtchnl.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_virtchnl.h with 88% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_virtchnl_pf.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c with 79% similarity]
i40e-dkms/i40e-1.5.18/src/i40e_virtchnl_pf.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h with 89% similarity]
i40e-dkms/i40e-1.5.18/src/kcompat.c [moved from i40e-dkms/i40e-1.3.47/src/i40e/kcompat.c with 94% similarity]
i40e-dkms/i40e-1.5.18/src/kcompat.h [moved from i40e-dkms/i40e-1.3.47/src/i40e/kcompat.h with 94% similarity]

index 257663cfdf7b7e2c0b40de835999f9373c371c60..4fab51835e7a2cf59db6bf93a880a1723c14b85a 100755 (executable)
@@ -1,3 +1,15 @@
+i40e-dkms (1.5.18-1~u14.04+mos1) MOS 10.0; urgency=low
+
+  * Update the driver i40e version up to 1.5.18
+  * There are two changes in dkms.conf file:
+  * 1)the KSRC variable now is used for pointing to correst kernel path
+  * 2)make was taken in quotes ('') to suppress passing KERNELRELEASE
+  * variable by DKMS otherwise the building fails.
+  * Like: MAKE[0]="'make' -C src/ KSRC=/lib/modules/{kernelver}/build"
+
+ -- Mirantis Openstack Linux Team <mos-linux@mirantis.com>  Wed, 11 May 2016 21:35:42 +0200
+ -- The sources were taken from http://sourceforge.net/projects/e1000/files/i40e%20stable/1.5.18/
+
 i40e-dkms (1.3.47-1~u14.04+mos2) MOS 9.0; urgency=low
 
   * Cleanup, removing unnecessary version from the i40e-dkms folder name
 i40e-dkms (1.3.47-1~u14.04+mos2) MOS 9.0; urgency=low
 
   * Cleanup, removing unnecessary version from the i40e-dkms folder name
index 7254308d84c95bd055af662f5a960543450f0aa6..d4b9bc1a92687ad2e6106e504fc9ff12a87c1cd0 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 NAME=i40e
 #!/bin/sh
 
 NAME=i40e
-VERSION=1.3.47
+VERSION=1.5.18
 
 set -e
 
 
 set -e
 
index d2a9f0fd78e9b6300c4d7e7d3406a844a305bc56..b05402366b58172ada302f8a59aa0debd0a9ed34 100755 (executable)
@@ -6,7 +6,7 @@
 
 DEB_NAME=i40e
 NAME=i40e
 
 DEB_NAME=i40e
 NAME=i40e
-VERSION=1.3.47
+VERSION=1.5.18
 
 configure: configure-stamp
 configure-stamp:
 
 configure: configure-stamp
 configure-stamp:
diff --git a/i40e-dkms/i40e-1.3.47/SUMS b/i40e-dkms/i40e-1.3.47/SUMS
deleted file mode 100644 (file)
index 1b96aea..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-46789     4 i40e-1.3.47/pci.updates
-63894     4 i40e-1.3.47/src/i40e/i40e_helper.h
-44588     1 i40e-1.3.47/src/i40e/Module.supported
-13919    68 i40e-1.3.47/src/i40e/i40e_adminq_cmd.h
-07159   138 i40e-1.3.47/src/i40e/kcompat.h
-19688    82 i40e-1.3.47/src/i40e/i40e_debugfs.c
-05671    86 i40e-1.3.47/src/i40e/i40e_txrx.c
-29128     6 i40e-1.3.47/src/i40e/i40e_lan_hmc.h
-33481    42 i40e-1.3.47/src/i40e/i40e_nvm.c
-04585    30 i40e-1.3.47/src/i40e/i40e_adminq.c
-33767     2 i40e-1.3.47/src/i40e/Kbuild
-07400     3 i40e-1.3.47/src/i40e/i40e_alloc.h
-30794    66 i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c
-11873     7 i40e-1.3.47/src/i40e/i40e_dcb.h
-60649     2 i40e-1.3.47/src/i40e/i40e_diag.h
-29182    21 i40e-1.3.47/src/i40e/i40e_prototype.h
-29883    23 i40e-1.3.47/src/i40e/i40e_ptp.c
-36071    37 i40e-1.3.47/src/i40e/i40e_dcb.c
-49741     4 i40e-1.3.47/src/i40e/i40e_osdep.h
-00677     9 i40e-1.3.47/src/i40e/i40e_dcb_nl.c
-37534   320 i40e-1.3.47/src/i40e/i40e_main.c
-17556   221 i40e-1.3.47/src/i40e/i40e_register.h
-57597    52 i40e-1.3.47/src/i40e/kcompat.c
-52476     5 i40e-1.3.47/src/i40e/i40e_adminq.h
-50800     6 i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h
-57192    47 i40e-1.3.47/src/i40e/i40e_fcoe.c
-42138     9 i40e-1.3.47/src/i40e/i40e_hmc.h
-30403    52 i40e-1.3.47/src/i40e/i40e_type.h
-64716   105 i40e-1.3.47/src/i40e/i40e_ethtool.c
-22745     4 i40e-1.3.47/src/i40e/i40e_status.h
-60864    13 i40e-1.3.47/src/i40e/i40e_virtchnl.h
-23673     6 i40e-1.3.47/src/i40e/i40e_diag.c
-30055     2 i40e-1.3.47/src/i40e/i40e_devids.h
-23231    11 i40e-1.3.47/src/i40e/i40e_hmc.c
-45761    11 i40e-1.3.47/src/i40e/i40e_configfs.c
-12717   164 i40e-1.3.47/src/i40e/i40e_common.c
-25879    42 i40e-1.3.47/src/i40e/i40e_lan_hmc.c
-65207     5 i40e-1.3.47/src/i40e/i40e_fcoe.h
-10333    31 i40e-1.3.47/src/i40e/i40e.h
-22557    13 i40e-1.3.47/src/i40e/i40e_txrx.h
-48687    11 i40e-1.3.47/src/Makefile
-09576     6 i40e-1.3.47/scripts/set_irq_affinity
-53852     2 i40e-1.3.47/scripts/dump_tables
-02733    18 i40e-1.3.47/COPYING
-52865    10 i40e-1.3.47/i40e.spec
-18539    35 i40e-1.3.47/README
-08612     3 i40e-1.3.47/i40e.7
diff --git a/i40e-dkms/i40e-1.3.47/pci.updates b/i40e-dkms/i40e-1.3.47/pci.updates
deleted file mode 100644 (file)
index e0ae83f..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-# updates for the system pci.ids file
-#
-# IMPORTANT!  Entries in this list must be sorted as they
-#             would appear in the system pci.ids file.  Entries
-#             are sorted by ven, dev, subven, subdev
-#             (numerical order).
-#
-8086  Intel Corporation
-       154c  XL710/X710 Virtual Function
-       1571  XL710/X710 Virtual Function
-       1572  Ethernet Controller X710 for 10GbE SFP+
-               1028 0000  Ethernet 10G X710 rNDC
-               1028 1f99  Ethernet 10G 4P X710/I350 rNDC
-               1028 1f9c  Ethernet 10G 4P X710 SFP+ rNDC
-               103c 0000  HPE Ethernet 10Gb 562SFP+ Adapter
-               103c 22fc  HPE Ethernet 10Gb 2-port 562FLR-SFP+ Adapter
-               103c 22fd  HPE Ethernet 10Gb 2-port 562SFP+ Adapter
-               1137 0000  Cisco(R) Ethernet Converged NIC X710-4
-               1137 013b  Cisco(R) Ethernet Converged NIC X710-4
-               17aa 0000  Lenovo ThinkServer X710 AnyFabric for 10GbE SFP+
-               17aa 4001  Lenovo ThinkServer X710-4 AnyFabric for 10GbE SFP+
-               17aa 4002  Lenovo ThinkServer X710-2 AnyFabric for 10GbE SFP+
-               8086 0000  Ethernet Converged Network Adapter X710
-               8086 0001  Ethernet Converged Network Adapter X710-4
-               8086 0002  Ethernet Converged Network Adapter X710-4
-               8086 0004  Ethernet Converged Network Adapter X710-4
-               8086 0005  Ethernet Converged Network Adapter X710
-               8086 0006  Ethernet Converged Network Adapter X710
-               8086 0007  Ethernet Converged Network Adapter X710-2
-               8086 0008  Ethernet Converged Network Adapter X710-2
-               8086 0009  Ethernet Controller X710 for 10GbE SFP+ 
-               8086 000a  Ethernet Controller X710 for 10GbE SFP+ 
-               8086 4005  Ethernet Controller X710 for 10GbE SFP+
-               8086 4006  Ethernet Controller X710 for 10GbE SFP+ 
-       1580  Ethernet Controller XL710 for 40GbE backplane
-       1581  Ethernet Controller X710 for 10GbE backplane
-               1028 0000  Ethernet 10G X710-k bNDC
-               1028 1f98  Ethernet 10G 4P X710-k bNDC
-               1028 1f9e  Ethernet 10G 2P X710-k bNDC
-               8086 0000  Ethernet Converged Network Adapter XL710-Q2
-       1583  Ethernet Controller XL710 for 40GbE QSFP+
-               1028 0000  Ethernet 40G 2P XL710 QSFP+ rNDC
-               1028 1f9f  Ethernet 40G 2P XL710 QSFP+ rNDC
-               108e 0000  Oracle Quad 10Gb Ethernet Adapter
-               108e 7b1b  Oracle Quad 10Gb Ethernet Adapter
-               1137 0000  Cisco(R) Ethernet Converged NIC XL710-Q2
-               1137 013c  Cisco(R) Ethernet Converged NIC XL710-Q2
-               8086 0000  Ethernet Converged Network Adapter XL710-Q2
-               8086 0001  Ethernet Converged Network Adapter XL710-Q2
-               8086 0002  Ethernet Converged Network Adapter XL710-Q2
-               8086 0003  Ethernet I/O Module XL710-Q2
-               8086 0004  Ethernet Server Adapter XL710-Q2OCP
-               8086 0006  Ethernet Converged Network Adapter XL710-Q2
-       1584  Ethernet Controller XL710 for 40GbE QSFP+
-               8086 0000  Ethernet Converged Network Adapter XL710-Q1
-               8086 0001  Ethernet Converged Network Adapter XL710-Q1
-               8086 0002  Ethernet Converged Network Adapter XL710-Q1
-               8086 0003  Ethernet I/O Module XL710-Q1
-               8086 0004  Ethernet Server Adapter XL710-Q1OCP
-       1585  Ethernet Controller X710 for 10GbE QSFP+
-       1586  Ethernet Controller X710 for 10GBASE-T
-               108e 0000  Ethernet Controller X710 for 10GBASE-T
-               108e 4857  Ethernet Controller X710 for 10GBASE-T
-       1587  Ethernet Controller XL710 for 20GbE backplane
-               103c 0000  HPE Flex-20 20Gb 2-port 660FLB Adapter
-               103c 22fe  HPE Flex-20 20Gb 2-port 660FLB Adapter
-       1588  Ethernet Controller XL710 for 20GbE backplane
-               103c 0000  HPE Flex-20 20Gb 2-port 660M Adapter
-               103c 22ff  HPE Flex-20 20Gb 2-port 660M Adapter
-       1589  Ethernet Controller X710/X557-AT 10GBASE-T
-               8086 0000  Ethernet Converged Network Adapter X710-T
-               8086 0001  Ethernet Converged Network Adapter X710-T4
-               8086 0002  Ethernet Converged Network Adapter X710-T4
diff --git a/i40e-dkms/i40e-1.3.47/src/Makefile b/i40e-dkms/i40e-1.3.47/src/Makefile
deleted file mode 100644 (file)
index 1f20882..0000000
+++ /dev/null
@@ -1,281 +0,0 @@
-###########################################################################
-#
-# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-###########################################################################
-# Environment tests
-
-ifeq (,$(BUILD_KERNEL))
-BUILD_KERNEL=$(shell uname -r)
-endif
-
-ifeq (,$(wildcard build.mk))
-       DRIVERS :=  $(shell ls -ld i40e* | awk '/^d/ { print $$9 }')
-else
-       DRIVERS :=  $(shell ls -ld i40e i40evf | awk '/^d/ { print $$9 }')
-endif
-DIRS :=  $(patsubst %,%/,$(DRIVERS))
-SOURCES := $(shell find $(DRIVERS) -name "*.[ch]" | grep -v "\.mod\.c")
-MODULES := $(patsubst %,%.ko,$(DRIVERS))
-TARGETS := $(join $(DIRS), $(MODULES))
-MANFILES := $(patsubst %,%.7,$(DRIVERS))
-MANFILES := $(patsubst %,../%,$(MANFILES))
-###########################################################################
-# Environment tests
-
-# Kernel Search Path
-# All the places we look for kernel source
-KSP :=  /lib/modules/$(BUILD_KERNEL)/build \
-        /lib/modules/$(BUILD_KERNEL)/source \
-        /usr/src/linux-$(BUILD_KERNEL) \
-        /usr/src/linux-$($(BUILD_KERNEL) | sed 's/-.*//') \
-        /usr/src/kernel-headers-$(BUILD_KERNEL) \
-        /usr/src/kernel-source-$(BUILD_KERNEL) \
-        /usr/src/linux-$($(BUILD_KERNEL) | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/') \
-        /usr/src/linux
-
-# prune the list down to only values that exist
-# and have an include/config sub-directory
-# as of last check, everything beyond 2.6.32 should have include/config
-# even in the SLES12 /lib/modules/`uname -r`/build
-test_dir = $(shell [ -e $(dir)/include/config ] && echo $(dir))
-KSP := $(foreach dir, $(KSP), $(test_dir))
-
-# we will use this first valid entry in the search path
-ifeq (,$(KSRC))
-  KSRC := $(firstword $(KSP))
-endif
-
-ifeq (,$(KSRC))
-  $(warning *** Kernel header files not in any of the expected locations.)
-  $(warning *** Install the appropriate kernel development package, e.g.)
-  $(error kernel-devel, for building kernel modules and try again)
-else
-ifeq (/lib/modules/$(BUILD_KERNEL)/source, $(KSRC))
-  KOBJ :=  /lib/modules/$(BUILD_KERNEL)/build
-else
-  KOBJ :=  $(KSRC)
-endif
-endif
-
-# Version file Search Path
-VSP :=  $(KOBJ)/include/generated/utsrelease.h \
-        $(KOBJ)/include/linux/utsrelease.h \
-        $(KOBJ)/include/linux/version.h \
-        $(KOBJ)/include/generated/uapi/linux/version.h \
-        /boot/vmlinuz.version.h
-
-# Config file Search Path
-CSP :=  $(KOBJ)/include/generated/autoconf.h \
-        $(KOBJ)/include/linux/autoconf.h \
-        /boot/vmlinuz.autoconf.h
-
-# prune the lists down to only files that exist
-test_file = $(shell [ -f $(file) ] && echo $(file))
-VSP := $(foreach file, $(VSP), $(test_file))
-CSP := $(foreach file, $(CSP), $(test_file))
-
-# and use the first valid entry in the Search Paths
-ifeq (,$(VERSION_FILE))
-  VERSION_FILE := $(firstword $(VSP))
-endif
-ifeq (,$(CONFIG_FILE))
-  CONFIG_FILE := $(firstword $(CSP))
-endif
-
-ifeq (,$(wildcard $(VERSION_FILE)))
-  $(error Linux kernel source not configured - missing version header file)
-endif
-
-ifeq (,$(wildcard $(CONFIG_FILE)))
-  $(error Linux kernel source not configured - missing autoconf.h)
-endif
-
-# Some helper functions for converting kernel version to version codes
-get_kver = $(or $(word ${2},$(subst ., ,${1})),0)
-get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \
-                       [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \
-                       [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \
-                       printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) )
-
-# Convert LINUX_VERSION into LINUX_VERSION_CODE
-ifneq (${LINUX_VERSION},)
-  LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3))
-endif
-
-# Honor LINUX_VERSION_CODE
-ifneq (${LINUX_VERSION_CODE},)
-  $(info Setting LINUX_VERSION_CODE to ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}))
-  KVER_CODE := ${LINUX_VERSION_CODE}
-  EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE}
-endif
-
-EXTRA_CFLAGS += $(CFLAGS_EXTRA)
-
-# get the kernel version - we use this to find the correct install path
-KVER := $(shell $(CC) $(EXTRA_CFLAGS) -E -dM $(VERSION_FILE) | grep UTS_RELEASE | \
-        awk '{ print $$3 }' | sed 's/\"//g')
-
-# assume source symlink is the same as build, otherwise adjust KOBJ
-ifneq (,$(wildcard /lib/modules/$(KVER)/build))
-ifneq ($(KSRC),$(shell readlink /lib/modules/$(KVER)/build))
-  KOBJ=/lib/modules/$(KVER)/build
-endif
-endif
-
-ifeq (${KVER_CODE},)
-  KVER_CODE := $(shell $(CC) $(EXTRA_CFLAGS) -E -dM $(VSP) 2> /dev/null |\
-                 grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
-endif
-
-# set the install path before and after 3.2.0, and handle
-# distros like SLES 11 that backported the directories
-ifeq (1,$(shell [ -d /lib/modules/$(KVER)/kernel/drivers/net/ethernet/intel ] && echo 1 || echo 0))
-INSTDIR := /lib/modules/$(KVER)/kernel/drivers/net/ethernet/intel
-else
-INSTDIR := /lib/modules/$(KVER)/kernel/drivers/net
-endif
-
-# abort the build on kernels older than 2.6.32
-ifneq (1,$(shell [ $(KVER_CODE) -ge 132640 ] && echo 1 || echo 0))
-  $(error *** Aborting the build. \
-          *** This driver is not supported on kernel versions older than 2.6.32)
-endif
-
-MANSECTION = 7
-
-ifeq (,$(MANDIR))
-  # find the best place to install the man page
-  MANPATH := $(shell (manpath 2>/dev/null || echo $MANPATH) | sed 's/:/ /g')
-  ifneq (,$(MANPATH))
-    # test based on inclusion in MANPATH
-    test_dir = $(findstring $(dir), $(MANPATH))
-  else
-    # no MANPATH, test based on directory existence
-    test_dir = $(shell [ -e $(dir) ] && echo $(dir))
-  endif
-  # our preferred install path
-  # should /usr/local/man be in here ?
-  MANDIR := /usr/share/man /usr/man
-  MANDIR := $(foreach dir, $(MANDIR), $(test_dir))
-  MANDIR := $(firstword $(MANDIR))
-endif
-ifeq (,$(MANDIR))
-  # fallback to /usr/man
-  MANDIR := /usr/man
-endif
-
-# kernel build function
-# $1 is the relative path of the subdir to build in
-# $2 is the kernel build target
-kernelbuild = $(shell (\
-               if [ -n "$(KOBJ)" ]; then \
-                       $(MAKE) ccflags-y:="$(CFLAGS_EXTRA)" -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=$(realpath $(1)) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) $(2) ; \
-               else \
-                       $(MAKE) ccflags-y:="$(CFLAGS_EXTRA)" -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=$(realpath $(1)) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) $(2) ; \
-               fi > .tmp ; rm .tmp))
-
-
-###########################################################################
-# Build rules
-
-# We can't use the kernelbuild macro in verbose targets because it gobbles the
-# output of the shell.
-
-$(MODULES): $(TARGETS)
-       @cp $(TARGETS) .
-
-$(TARGETS): $(SOURCES)
-       @for s in $(DRIVERS) ; do \
-               if [ -n "$(KOBJ)" ]; then \
-                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s modules ; \
-               else \
-                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s modules ; \
-               fi ; \
-       done
-
-noisy: $(SOURCES)
-       @for s in $(DRIVERS) ; do \
-               if [ -n "$(KOBJ)" ]; then \
-                       $(MAKE) -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s V=1 modules ; \
-               else \
-                       $(MAKE) -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s V=1 modules ; \
-               fi ; \
-       done
-       @cp $(TARGETS) .
-
-silent: $(SOURCES)
-       $(foreach d, $(DRIVERS), $(call kernelbuild,$(d),modules))
-       @cp $(TARGETS) .
-
-sparse: clean $(SOURCES)
-       @for s in $(DRIVERS) ; do \
-               if [ -n "$(KOBJ)" ]; then \
-                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s C=2 CF="-D__CHECK_ENDIAN__ -Wbitwise -Wcontext" modules ; \
-               else \
-                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s C=2 CF="-D__CHECK_ENDIAN_ -Wbitwise -Wcontext" modules ; \
-               fi ; \
-       done
-
-ccc: clean $(SOURCES)
-       @for s in $(DRIVERS) ; do \
-               if [ -n "$(KOBJ)" ]; then \
-                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s coccicheck MODE=report; \
-               else \
-                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s coccicheck MODE=report; \
-               fi ; \
-       done
-
-manfile:
-       $(foreach m, $(DRIVERS), $(shell gzip -c ../$(m).$(MANSECTION) > $(m).$(MANSECTION).gz))
-
-clean:
-       $(foreach d, $(DRIVERS), $(call kernelbuild,$(d),clean))
-       @-rm -rf *.$(MANSECTION).gz *.ko
-
-install: $(MODULES) manfile
-# remove all old versions of the driver
-       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko))
-       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).gz))
-       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko.xz))
-       $(foreach m, $(DRIVERS), $(shell \
-               install -D -m 644 $(m).$(MANSECTION).gz $(INSTALL_MOD_PATH)$(MANDIR)/man$(MANSECTION)/$(m).$(MANSECTION).gz ; \
-               install -D -m 644 $(m).ko $(INSTALL_MOD_PATH)$(INSTDIR)/$(m)/$(m).ko))
-ifeq (,$(INSTALL_MOD_PATH))
-       @-/sbin/depmod -a $(KVER) || true
-else
-       @-/sbin/depmod -b $(INSTALL_MOD_PATH) -a -n $(KVER) > /dev/null || true
-endif
-
-uninstall:
-       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko))
-       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).gz))
-       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko.xz))
-       @-/sbin/depmod -a $(KVER)
-       $(foreach m, $(DRIVERS), $(shell \
-               if [ -e $(INSTALL_MOD_PATH)$(MANDIR)/man$(MANSECTION)/$(m).$(MANSECTION).gz ] ; then \
-                       rm -f $(INSTALL_MOD_PATH)$(MANDIR)/man$(MANSECTION)/$(m).$(MANSECTION).gz ; \
-               fi))
-
-.PHONY: noisy clean manfile silent sparse ccc install uninstall
-.NOTPARALLEL:
-
diff --git a/i40e-dkms/i40e-1.3.47/src/i40e/Kbuild b/i40e-dkms/i40e-1.3.47/src/i40e/Kbuild
deleted file mode 100644 (file)
index fdcc55f..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-################################################################################
-#
-# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
-#
-# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
-#
-
-obj-m += i40e.o
-
-i40e-objs := i40e_main.o \
-       kcompat.o       \
-       i40e_ethtool.o  \
-       i40e_adminq.o   \
-       i40e_common.o   \
-       i40e_hmc.o      \
-       i40e_lan_hmc.o  \
-       i40e_nvm.o      \
-       i40e_configfs.o \
-       i40e_debugfs.o  \
-       i40e_diag.o     \
-       i40e_txrx.o     \
-       i40e_ptp.o      \
-       i40e_virtchnl_pf.o
-
-
-i40e-$(CONFIG_DCB) += i40e_dcb.o i40e_dcb_nl.o
-
-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
diff --git a/i40e-dkms/i40e-1.3.47/src/i40e/i40e_configfs.c b/i40e-dkms/i40e-1.3.47/src/i40e/i40e_configfs.c
deleted file mode 100644 (file)
index fb260b6..0000000
+++ /dev/null
@@ -1,357 +0,0 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
-
-
-#include <linux/configfs.h>
-#include "i40e.h"
-
-#if IS_ENABLED(CONFIG_CONFIGFS_FS)
-
-/**
- * configfs structure for i40e
- *
- * This file adds code for configfs support for the i40e driver.  This sets
- * up a filesystem under /sys/kernel/config in which configuration changes
- * can be made for the driver's netdevs.
- *
- * The initialization in this code creates the "i40e" entry in the configfs
- * system.  After that, the user needs to use mkdir to create configurations
- * for specific netdev ports; for example "mkdir eth3".  This code will verify
- * that such a netdev exists and that it is owned by i40e.
- *
- **/
-
-struct i40e_cfgfs_vsi {
-       struct config_item item;
-       struct i40e_vsi *vsi;
-};
-
-static inline struct i40e_cfgfs_vsi *to_i40e_cfgfs_vsi(struct config_item *item)
-{
-       return item ? container_of(item, struct i40e_cfgfs_vsi, item) : NULL;
-}
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_min_bw = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "min_bw",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_max_bw = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "max_bw",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_commit = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "commit",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_port_count = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "ports",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute i40e_cfgfs_vsi_attr_part_count = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "partitions",
-       .ca_mode = S_IRUGO | S_IWUSR,
-};
-
-static struct configfs_attribute *i40e_cfgfs_vsi_attrs[] = {
-       &i40e_cfgfs_vsi_attr_min_bw,
-       &i40e_cfgfs_vsi_attr_max_bw,
-       &i40e_cfgfs_vsi_attr_commit,
-       &i40e_cfgfs_vsi_attr_port_count,
-       &i40e_cfgfs_vsi_attr_part_count,
-       NULL,
-};
-
-/**
- * i40e_cfgfs_vsi_attr_show - Show a VSI's NPAR BW partition info
- * @item: A pointer back to the configfs item created on driver load
- * @attr: A pointer to this item's configuration attribute
- * @page: A pointer to the output buffer
- **/
-static ssize_t i40e_cfgfs_vsi_attr_show(struct config_item *item,
-                                     struct configfs_attribute *attr,
-                                     char *page)
-{
-       struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item);
-       struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back;
-       ssize_t count;
-
-       if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi])
-               return 0;
-
-       if (strncmp(attr->ca_name, "min_bw", 6) == 0)
-               count = sprintf(page, "%s %s %d%%\n",
-                               i40e_cfgfs_vsi->vsi->netdev->name,
-                               (pf->min_bw & I40E_ALT_BW_RELATIVE_MASK) ?
-                               "Relative Min BW" : "Absolute Min BW",
-                               pf->min_bw & I40E_ALT_BW_VALUE_MASK);
-       else if (strncmp(attr->ca_name, "max_bw", 6) == 0)
-               count = sprintf(page, "%s %s %d%%\n",
-                               i40e_cfgfs_vsi->vsi->netdev->name,
-                               (pf->max_bw & I40E_ALT_BW_RELATIVE_MASK) ?
-                               "Relative Max BW" : "Absolute Max BW",
-                               pf->max_bw & I40E_ALT_BW_VALUE_MASK);
-       else if (strncmp(attr->ca_name, "ports", 5) == 0)
-               count = sprintf(page, "%d\n",
-                               pf->hw.num_ports);
-       else if (strncmp(attr->ca_name, "partitions", 10) == 0)
-               count = sprintf(page, "%d\n",
-                               pf->hw.num_partitions);
-       else
-               return 0;
-
-       return count;
-}
-
-/**
- * i40e_cfgfs_vsi_attr_store - Show a VSI's NPAR BW partition info
- * @item: A pointer back to the configfs item created on driver load
- * @attr: A pointer to this item's configuration attribute
- * @page: A pointer to the user input buffer holding the user input values
- **/
-static ssize_t i40e_cfgfs_vsi_attr_store(struct config_item *item,
-                                      struct configfs_attribute *attr,
-                                      const char *page, size_t count)
-{
-       struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item);
-       struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back;
-       char *p = (char *) page;
-       int rc;
-       unsigned long tmp;
-
-       if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi])
-               return 0;
-
-       if (!p || (*p && (*p == '\n')))
-               return -EINVAL;
-
-       rc = kstrtoul(p, 10, &tmp);
-       if (rc)
-               return rc;
-       if (tmp > 100)
-               return -ERANGE;
-
-       if (strncmp(attr->ca_name, "min_bw", 6) == 0) {
-               if (tmp > (pf->max_bw & I40E_ALT_BW_VALUE_MASK))
-                       return -ERANGE;
-               /* Preserve the valid and relative BW bits - the rest is
-                * don't care.
-                */
-               pf->min_bw &= (I40E_ALT_BW_RELATIVE_MASK |
-                                   I40E_ALT_BW_VALID_MASK);
-               pf->min_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
-               i40e_set_partition_bw_setting(pf);
-       } else if (strncmp(attr->ca_name, "max_bw", 6) == 0) {
-               if (tmp < 1 ||
-                   tmp < (pf->min_bw & I40E_ALT_BW_VALUE_MASK))
-                       return -ERANGE;
-               /* Preserve the valid and relative BW bits - the rest is
-                * don't care.
-                */
-               pf->max_bw &= (I40E_ALT_BW_RELATIVE_MASK |
-                                   I40E_ALT_BW_VALID_MASK);
-               pf->max_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
-               i40e_set_partition_bw_setting(pf);
-       } else if (strncmp(attr->ca_name, "commit", 6) == 0 && tmp == 1) {
-               if (i40e_commit_partition_bw_setting(pf))
-                       return -EIO;
-       }
-
-       return count;
-}
-
-/**
- * i40e_cfgfs_vsi_release - Free up the configuration item memory
- * @item: A pointer back to the configfs item created on driver load
- **/
-static void i40e_cfgfs_vsi_release(struct config_item *item)
-{
-       kfree(to_i40e_cfgfs_vsi(item));
-}
-
-static struct configfs_item_operations i40e_cfgfs_vsi_item_ops = {
-       .release                = i40e_cfgfs_vsi_release,
-       .show_attribute         = i40e_cfgfs_vsi_attr_show,
-       .store_attribute        = i40e_cfgfs_vsi_attr_store,
-};
-
-static struct config_item_type i40e_cfgfs_vsi_type = {
-       .ct_item_ops    = &i40e_cfgfs_vsi_item_ops,
-       .ct_attrs       = i40e_cfgfs_vsi_attrs,
-       .ct_owner       = THIS_MODULE,
-};
-
-struct i40e_cfgfs_group {
-       struct config_group group;
-};
-
-/**
- * to_i40e_cfgfs_group - Get the group pointer from the config item
- * @item: A pointer back to the configfs item created on driver load
- **/
-static inline struct i40e_cfgfs_group *
-to_i40e_cfgfs_group(struct config_item *item)
-{
-       return item ? container_of(to_config_group(item),
-                                  struct i40e_cfgfs_group, group) : NULL;
-}
-
-/**
- * i40e_cfgfs_group_make_item - Create the configfs item with group container
- * @group: A pointer to our configfs group
- * @name: A pointer to the nume of the device we're looking for
- **/
-static struct config_item *
-i40e_cfgfs_group_make_item(struct config_group *group, const char *name)
-{
-       struct i40e_cfgfs_vsi *i40e_cfgfs_vsi;
-       struct net_device *netdev;
-       struct i40e_netdev_priv *np;
-
-       read_lock(&dev_base_lock);
-       netdev = first_net_device(&init_net);
-       while (netdev) {
-               if (strncmp(netdev->name, name, sizeof(netdev->name)) == 0)
-                       break;
-               netdev = next_net_device(netdev);
-       }
-       read_unlock(&dev_base_lock);
-
-       if (!netdev)
-               return ERR_PTR(-ENODEV);
-
-       /* is this netdev owned by i40e? */
-       if (netdev->netdev_ops->ndo_open != i40e_open)
-               return ERR_PTR(-EACCES);
-
-       i40e_cfgfs_vsi = kzalloc(sizeof(struct i40e_cfgfs_vsi), GFP_KERNEL);
-       if (!i40e_cfgfs_vsi)
-               return ERR_PTR(-ENOMEM);
-
-       np = netdev_priv(netdev);
-       i40e_cfgfs_vsi->vsi = np->vsi;
-       config_item_init_type_name(&i40e_cfgfs_vsi->item, name,
-                                  &i40e_cfgfs_vsi_type);
-
-       return &i40e_cfgfs_vsi->item;
-}
-
-static struct configfs_attribute i40e_cfgfs_group_attr_description = {
-       .ca_owner = THIS_MODULE,
-       .ca_name = "description",
-       .ca_mode = S_IRUGO,
-};
-
-static struct configfs_attribute *i40e_cfgfs_group_attrs[] = {
-       &i40e_cfgfs_group_attr_description,
-       NULL,
-};
-
-static ssize_t i40e_cfgfs_group_attr_show(struct config_item *item,
-                                        struct configfs_attribute *attr,
-                                        char *page)
-{
-       return sprintf(page,
-"i40e\n"
-"\n"
-"This subsystem allows the modification of network port configurations.\n"
-"To start, use the name of the network port to be configured in a 'mkdir'\n"
-"command, e.g. 'mkdir eth3'.\n");
-}
-
-static void i40e_cfgfs_group_release(struct config_item *item)
-{
-       kfree(to_i40e_cfgfs_group(item));
-}
-
-static struct configfs_item_operations i40e_cfgfs_group_item_ops = {
-       .release        = i40e_cfgfs_group_release,
-       .show_attribute = i40e_cfgfs_group_attr_show,
-};
-
-/*
- * Note that, since no extra work is required on ->drop_item(),
- * no ->drop_item() is provided.
- */
-static struct configfs_group_operations i40e_cfgfs_group_ops = {
-       .make_item      = i40e_cfgfs_group_make_item,
-};
-
-static struct config_item_type i40e_cfgfs_group_type = {
-       .ct_item_ops    = &i40e_cfgfs_group_item_ops,
-       .ct_group_ops   = &i40e_cfgfs_group_ops,
-       .ct_attrs       = i40e_cfgfs_group_attrs,
-       .ct_owner       = THIS_MODULE,
-};
-
-static struct configfs_subsystem i40e_cfgfs_group_subsys = {
-       .su_group = {
-               .cg_item = {
-                       .ci_namebuf = "i40e",
-                       .ci_type = &i40e_cfgfs_group_type,
-               },
-       },
-};
-
-/**
- * i40e_configfs_init - Initialize configfs support for our driver
- **/
-int i40e_configfs_init(void)
-{
-       int ret;
-       struct configfs_subsystem *subsys;
-
-       subsys = &i40e_cfgfs_group_subsys;
-
-       config_group_init(&subsys->su_group);
-       mutex_init(&subsys->su_mutex);
-       ret = configfs_register_subsystem(subsys);
-       if (ret) {
-               pr_err("Error %d while registering configfs subsystem %s\n",
-                      ret, subsys->su_group.cg_item.ci_namebuf);
-               return ret;
-       }
-
-       return 0;
-}
-
-/**
- * i40e_configfs_init - Bail out - unregister configfs subsystem and release
- **/
-void i40e_configfs_exit(void)
-{
-       configfs_unregister_subsystem(&i40e_cfgfs_group_subsys);
-}
-
-#endif /* CONFIG_CONFIGFS_FS */
similarity index 66%
rename from i40e-dkms/i40e-1.3.47/README
rename to i40e-dkms/i40e-1.5.18/README
index 9e82a6110467e638b8fdbbd0b2d2b8fa51f5c844..dd645cbd01b830485de7cf89cf37d5c4ddabe4d8 100755 (executable)
@@ -4,7 +4,7 @@ i40e Linux* Base Driver for the Intel(R) XL710 Ethernet Controller Family
 
 ===============================================================================
 
 
 ===============================================================================
 
-September 25, 2015
+April 6, 2016
 
 ===============================================================================
 
 
 ===============================================================================
 
@@ -26,6 +26,26 @@ Contents
 Important Notes
 ---------------
 
 Important Notes
 ---------------
 
+Enabling a VF link if the port is disconnected
+----------------------------------------------
+
+If the physical function (PF) link is down, you can force link up (from the host
+PF) on any virtual functions (VF) bound to the PF. Note that this requires
+kernel support (Redhat kernel 3.10.0-327 or newer, upstream kernel 3.11.0 or
+newer, and associated iproute2 user space support). If the following command
+does not work, it may not be supported by your system. The following command
+forces link up on VF 0 bound to PF eth0:
+  ip link set eth0 vf 0 state enable
+
+
+Do not unload port driver if VF with active VM is bound to it
+-------------------------------------------------------------
+
+Do not unload a port's driver if a Virtual Function (VF) with an active Virtual
+Machine (VM) is bound to it. Doing so will cause the port to appear to hang.
+Once the VM shuts down, or otherwise releases the VF, the command will complete.
+
+
 Configuring SR-IOV for improved network security
 ------------------------------------------------
 
 Configuring SR-IOV for improved network security
 ------------------------------------------------
 
@@ -43,12 +63,13 @@ frames to be dropped.
 Overview
 --------
 
 Overview
 --------
 
-This document describes the i40e Linux* Base Driver for the XL710 Ethernet Controller Family of Adapters.
+This driver supports kernel versions 2.6.32 and newer.
 
 
-The Linux* base driver supports the following kernel versions:
-2.6.32 and newer
+It supports Linux supported x86_64 systems.
 
 
-It includes support for Linux supported x86_64 systems.
+Driver information can be obtained using ethtool, lspci, and ifconfig.
+Instructions on updating ethtool can be found in the section Additional
+Configurations later in this document.
 
 This driver is only supported as a loadable module at this time. Intel is
 not supplying patches against the kernel source to allow for static linking of
 
 This driver is only supported as a loadable module at this time. Intel is
 not supplying patches against the kernel source to allow for static linking of
@@ -62,26 +83,24 @@ The following features are now available in supported kernels:
 - Native VLANs
 - Channel Bonding (teaming)
 - SNMP
 - Native VLANs
 - Channel Bonding (teaming)
 - SNMP
-- Generic Receive Offload
 
 Adapter teaming is implemented using the native Linux Channel bonding
 module. This is included in supported Linux kernels.
 
 Adapter teaming is implemented using the native Linux Channel bonding
 module. This is included in supported Linux kernels.
+
 Channel Bonding documentation can be found in the Linux kernel source:
 /documentation/networking/bonding.txt
 
 The driver information previously displayed in the /proc file system is not
 supported in this release.
 
 Channel Bonding documentation can be found in the Linux kernel source:
 /documentation/networking/bonding.txt
 
 The driver information previously displayed in the /proc file system is not
 supported in this release.
 
-Driver information can be obtained using ethtool, lspci, and ifconfig.
-Instructions on updating ethtool can be found in the section Additional 
-Configurations later in this document.
-
 
 
 Identifying Your Adapter
 ------------------------
 
 
 Identifying Your Adapter
 ------------------------
-The driver in this release is compatible with XL710 and X710-based Intel
-Ethernet Network Connections.
+The driver in this release is compatible with devices based on the following:
+  * Intel(R) Ethernet Controller X710
+  * Intel(R) Ethernet Controller XL710
+  * Intel(R) Ethernet Controller X722
 
 For information on how to identify your adapter, go to the Adapter &
 Driver ID Guide at:
 
 For information on how to identify your adapter, go to the Adapter &
 Driver ID Guide at:
@@ -118,7 +137,14 @@ Avago              1000BASE-T      SFP ABCU-5710RZ
 
 QSFP+ Modules
 -------------
 
 QSFP+ Modules
 -------------
+NOTE: Intel branded network adapters based on the X710/XL710 controller
+  (for example, Intel(R) Ethernet Converged Network Adapter XL710-Q1) support
+  the E40GQSFPLR module. For other connections based on the X710/XL710
+  controller, support is dependent on your system board. Please see your vendor
+  for details.
+
   Intel        TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed)        E40GQSFPSR
   Intel        TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed)        E40GQSFPSR
+  Intel        TRIPLE RATE 1G/10G/40G QSFP+ LR (bailed)        E40GQSFPLR
     QSFP+ 1G speed is not supported on XL710 based devices.
 
 X710/XL710 Based SFP+ adapters support passive QSFP+ Direct Attach cables.
     QSFP+ 1G speed is not supported on XL710 based devices.
 
 X710/XL710 Based SFP+ adapters support passive QSFP+ Direct Attach cables.
@@ -156,7 +182,7 @@ NOTES:
 4. Compile the driver module:
    # make install
    The binary will be installed as:
 4. Compile the driver module:
    # make install
    The binary will be installed as:
-   /lib/modules/<KERNEL VERSION>/kernel/drivers/net/i40e/i40e.[k]o
+   /lib/modules/<KERNEL VERSION>/updates/drivers/net/ethernet/intel/i40e/i40e.ko
 
    The install location listed above is the default location. This may differ
    for various Linux distributions.
 
    The install location listed above is the default location. This may differ
    for various Linux distributions.
@@ -220,17 +246,27 @@ max_vfs
 Valid Range:
 1-32 (X710 based devices)
 1-64 (XL710 based devices)
 Valid Range:
 1-32 (X710 based devices)
 1-64 (XL710 based devices)
+
 NOTE: This parameter is only used on kernel 3.7.x and below. On kernel 3.8.x
 and above, use sysfs to enable VFs. For example:
 #echo $num_vf_enabled > /sys/class/net/$dev/device/sriov_numvfs        //enable VFs
 #echo 0 > /sys/class/net/$dev/device/sriov_numvfs      //disable VFs
 NOTE: This parameter is only used on kernel 3.7.x and below. On kernel 3.8.x
 and above, use sysfs to enable VFs. For example:
 #echo $num_vf_enabled > /sys/class/net/$dev/device/sriov_numvfs        //enable VFs
 #echo 0 > /sys/class/net/$dev/device/sriov_numvfs      //disable VFs
+
+The parameters for the driver are referenced by position. Thus, if you have a
+dual port adapter, or more than one adapter in your system, and want N virtual
+functions per port, you must specify a number for each port with each parameter
+separated by a comma. For example:
+  modprobe i40e max_vfs=4,1
+NOTE: Caution must be used in loading the driver with these parameters.
+Depending on your system configuration, number of slots, etc., it is impossible
+to predict in all cases where the positions would be on the command line.
 This parameter adds support for SR-IOV. It causes the driver to spawn up to
 max_vfs worth of virtual functions.
 Some hardware configurations support fewer SR-IOV instances, as the whole
 XL710 controller (all functions) is limited to 128 SR-IOV interfaces in total.
 This parameter adds support for SR-IOV. It causes the driver to spawn up to
 max_vfs worth of virtual functions.
 Some hardware configurations support fewer SR-IOV instances, as the whole
 XL710 controller (all functions) is limited to 128 SR-IOV interfaces in total.
-NOTE: When SR-IOV mode is enabled, hardware VLAN filtering
-and VLAN tag stripping/insertion will remain enabled. Please remove the old
-VLAN filter before the new VLAN filter is added. For example,
+NOTE: When SR-IOV mode is enabled, hardware VLAN
+filtering and VLAN tag stripping/insertion will remain enabled. Please remove
+the old VLAN filter before the new VLAN filter is added. For example,
 ip link set eth0 vf 0 vlan 100 // set vlan 100 for VF 0
 ip link set eth0 vf 0 vlan 0   // Delete vlan 100
 ip link set eth0 vf 0 vlan 200 // set a new vlan 200 for VF 0
 ip link set eth0 vf 0 vlan 100 // set vlan 100 for VF 0
 ip link set eth0 vf 0 vlan 0   // Delete vlan 100
 ip link set eth0 vf 0 vlan 200 // set a new vlan 200 for VF 0
@@ -263,41 +299,111 @@ the first VF on VLAN 10.
 $ ip link set dev eth0 vf 0 vlan 10
 .
 
 $ ip link set dev eth0 vf 0 vlan 10
 .
 
+VLAN Tag Packet Steering
+------------------------
+
+Allows you to send all packets with a specific VLAN tag to a particular
+SR-IOV virtual function (VF). Further, this feature allows you to designate
+a particular VF as trusted, and allows that trusted VF to request selective
+promiscuous mode on the Physical Function (PF).
+
+To set a VF as trusted or untrusted, enter the following command in the
+Hypervisor:
+  # ip link set dev eth0 vf 1 trust [on|off]
+
+Once the VF is designated as trusted, use the following commands in the VM
+to set the VF to promiscuous mode.
+  For promiscuous all:
+  #ip link set eth2 promisc on
+    Where eth2 is a VF interface in the VM
+  For promiscuous Multicast:
+  #ip link set eth2 allmulti on
+    Where eth2 is a VF interface in the VM
+
+    NOTE: By default, the ethtool priv-flag vf-true-promisc-support is set to 
+    “off”,meaning that promiscuous mode for the VF will be limited. To set the
+    promiscuous mode for the VF to true promiscuous and allow the VF to see
+    all ingress traffic, use the following command.
+      #ethtool –set-priv-flags p261p1 vf-true-promisc-support on
+    The vf-true-promisc-support priv-flag does not enable promiscuous mode;
+    rather, it designates which type of promiscuous mode (limited or true)
+    you will get when you enable promiscuous mode using the ip link commands 
+    above. Note that this is a global setting that affects the entire device.
+    However,the vf-true-promisc-support priv-flag is only exposed to the first
+    PF of the device. The PF remains in limited promiscuous mode (unless it
+    is in MFP mode) regardless of the vf-true-promisc-support setting.
+
+Now add a VLAN interface on the VF interface.
+  #ip link add link eth2 name eth2.100 type vlan id 100
+
+Note that the order in which you set the VF to promiscuous mode and add
+the VLAN interface does not matter (you can do either first). The end result
+in this example is that the VF will get all traffic that is tagged with
+VLAN 100.
+
+
 Intel(R) Ethernet Flow Director
 -------------------------------
 Intel(R) Ethernet Flow Director
 -------------------------------
+NOTE: Flow director parameters are only supported on kernel versions 2.6.30 or
+newer.
+
 The Flow Director performs the following tasks:
 
   - Directs receive packets according to their flows to different queues.
   - Enables tight control on routing a flow in the platform.
   - Matches flows and CPU cores for flow affinity.
   - Supports multiple parameters for flexible flow classification and load
 The Flow Director performs the following tasks:
 
   - Directs receive packets according to their flows to different queues.
   - Enables tight control on routing a flow in the platform.
   - Matches flows and CPU cores for flow affinity.
   - Supports multiple parameters for flexible flow classification and load
-    balancing.
+    balancing (in SFP mode only).
 
 NOTES:
 
 
 NOTES:
 
-  - The Flow Director is enabled only if the kernel supports multiple
-    transmit queues.
   - An included script (set_irq_affinity) automates setting the IRQ to
     CPU affinity.
   - An included script (set_irq_affinity) automates setting the IRQ to
     CPU affinity.
-  - The i40e Linux driver does not support configuration of the mask field.
-    It only accepts rules that completely qualify a certain flow type.
+  - The Linux i40e driver supports the following flow types: IPv4, TCPv4, and
+    UDPv4. For a given flow type, it supports valid combinations of
+    IP addresses (source or destination) and UDP/TCP ports (source and 
+    destination). For example, you can supply only a source IP address,
+    a source IP address and a destination port, or any combination of one or
+    more of these four parameters.
+  - The Linux i40e driver allows you to filter traffic based on a user-defined
+    flexible two-byte pattern and offset by using the ethtool user-def and
+    mask fields. Only L3 and L4 flow types are supported for user-defined 
+    flexible filters. For a given flow type, you must clear all Flow Director
+    filters before changing the input set (for that flow type).
 
 ethtool commands:
 
 
 ethtool commands:
 
-  - To enable or disable the Flow Director
+  - To enable or disable the Flow Director:
 
        # ethtool -K ethX ntuple <on|off>
 
 
        # ethtool -K ethX ntuple <on|off>
 
-       When disabling ntuple filters all the user programed filters are flushed
-       from the driver cache and hardware. Filters must be re-added if they are
-       needed when ntuple is re-enabled.
+       When disabling ntuple filters, all the user programed filters are
+       flushed from the driver cache and hardware. All needed filters must
+       be re-added when ntuple is re-enabled.
 
 
-  - To add a filter that directs packet to queue 2, use -U or -N switch
+  - To add a filter that directs packet to queue 2, use -U or -N switch:
 
        # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
        192.168.10.2 src-port 2000 dst-port 2001 action 2 [loc 1]
 
 
        # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
        192.168.10.2 src-port 2000 dst-port 2001 action 2 [loc 1]
 
-  - To see the list of filters currently present
+   To set a filter using only the source and destination IP address:
+
+       # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+       192.168.10.2 action 2 [loc 1]
+
+   To set a filter based on a user defined pattern and offset:
+
+       # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+       192.168.10.2 user-def 0xffffffff00000001 m 0x40 action 2 [loc 1]
+
+       where the value of the user-def field (0xffffffff00000001) is the
+       pattern and m 0x40 is the offset.
+
+       Note that in this case the mask (m 0x40) parameter is used with the
+       user-def field, whereas for cloud filter support the mask parameter
+       is not used.
+
+  - To see the list of filters currently present:
        # ethtool <-u|-n> ethX
 
 Application Targeted Routing (ATR) Perfect Filters
        # ethtool <-u|-n> ethX
 
 Application Targeted Routing (ATR) Perfect Filters
@@ -352,6 +458,108 @@ sideband filter list replay. This will help flush any stale ATR rules and
 create space.
 
 
 create space.
 
 
+Cloud Filter Support
+--------------------
+On a complex network that supports multiple types of traffic (such as for
+storage as well as cloud), cloud filter support allows you to send one type of
+ traffic (for example, the storage traffic) to the Physical Function (PF) and
+another type (say, the cloud traffic) to a Virtual Function (VF). Because cloud
+networks are typically VXLAN/Geneve-based, you can define a cloud filter to
+identify VXLAN/Geneve packets and send them to a queue in the VF to be
+processed by the virtual machine (VM). Similarly, other cloud filters can be
+designed for various other traffic tunneling.
+
+NOTES:
+  - Cloud filters are only supported when the underlying device is in Single
+    Function per Port mode.
+  - The "action -1" option, which drops matching packets in regular Flow
+    Director filters, is not available to drop packets when used with 
+    cloud filters.
+  - For IPv4 and ether flow-types, cloud filters cannot be used for TCP or
+    UDP filters.
+  - Cloud filters can be used as a method for implementing queue splitting in
+    the PF.
+
+The following filters are supported:
+  Cloud Filters
+    Inner MAC, Inner VLAN (for NVGRE, VXLAN or Geneve packets)
+    Inner MAC, Inner VLAN, Tenant ID (for NVGRE, VXLAN or Geneve packets)
+    Inner MAC, Tenant ID (NVGRE packet or VXLAN/Geneve packets)
+    Outer MAC L2 filter
+    Inner MAC filter
+    Outer MAC, Tenant ID, Inner MAC
+    Application Destination IP
+    Application Source-IP, Inner MAC
+    ToQueue: Use MAC, VLAN to point to a queue
+  L3 filters
+    Application Destination IP
+
+Use ethtool’s flow director and user defined (user-def) options to define
+cloud filters for tunneled packets (VF) and L3 filters for non-tunneled
+packets (PF or VF). In this case, the user-def field specifies that a cloud
+filter is being programmed instead of a Flow Director filter. Note that this
+is not the same as setting filters using a user-defined pattern and offset,
+which requires using the mask ('m') parameter in conjunction with the user-def
+field (see the Intel Ethernet Flow Director section in this document).
+
+For regular Flow Director filters:
+
+  - No user-def specified or upper 32 bits of user-def is all 0s
+
+  Example:
+
+    ethtool -N p258p1 flow-type ip4 src-ip 192.168.1.108 dst-ip 192.168.1.109 /
+    action 6 loc 3
+
+For L3 filters (non-tunneled packets):
+
+  - “user-def 0xffffffff00000002” (no Tenant ID/VNI specified in the upper
+    32 bits of the user-def field and send to VF id 2)
+  - Only L3 parameters (src-IP, dst-IP) are considered
+
+  Example:
+
+    ethtool -N p4p2 flow-type ip4 src-ip 192.168.42.13 dst-ip 192.168.42.33 /
+    src-port 12344 dst-port 12344 user-def 0xffffffff00000001 loc 3
+      Redirect traffic coming from 192.168.42.13 port 12344 with destination
+      192.168.42.33 port 12344 into VF id 1, and call this “rule 3”
+
+For cloud filters (tunneled packets):
+
+  - All other filters, including where Tenant ID/VNI is specified.
+  - The upper 32 bits of the user def field can carry the tenant ID/VNI
+    if specified or required.
+  - The lower 32 bits of the 'user-def' field can be used to specify the
+    VF ID. If the ID is greater than the maximum number of VFs currently
+    enabled then the ID will default back to the main VSI.
+  - Cloud filters can be defined with inner MAC, outer MAC, inner IP address,
+     inner VLAN, and VNI as part of the cloud tuple. Cloud filters filter on
+    destination (not source) MAC and IP. The destination and source MAC
+    address fields in the ethtool command are overloaded as dst = outer,
+    src = inner MAC address to facilitate tuple definition for a cloud filter.
+  - The 'loc' parameter specifies the rule number of the filter as being
+    stored in the base driver
+
+  Example:
+
+    ethtool -N p4p2 flow-type ip4 src-ip 192.168.42.13 dst-ip 192.168.42.33 /
+    src-port 12344 dst-port 12344 user-def 0x2200000001 loc 38
+      Redirect traffic on VXLAN using tunnel id 34 (hex 0x22) coming from
+      192.168.42.13 port 12344 with destination 192.168.42.33 port 12344 into
+      VF id 1, and call this “rule 38”
+      NOTE: If the VF id given is larger than the number of active VFs (e.g.
+      if you set num_vfs to 8 and use VF id 12 in the ethtool command) the
+      traffic will be redirected to the PF rather than to the VF.
+
+To see the list of filters currently present:
+    ethtool <-u|-n> ethX
+      NOTE: For cloud filters in which the specified VF is greater than
+      the number of VFs supported, the cloud filter will send traffic
+      to the PF. However, the driver does not store the specified VF
+      number, so in this case the ethtool -n command will display
+      0xffff for the VF number.
+
+
 ================================================================================
 
 
 ================================================================================
 
 
@@ -438,7 +646,7 @@ NAPI
 ----
 NAPI (Rx polling mode) is supported in the i40e driver.
 For more information on NAPI, see
 ----
 NAPI (Rx polling mode) is supported in the i40e driver.
 For more information on NAPI, see
-ftp://robur.slu.se/pub/Linux/net-development/NAPI/usenix-paper.tgz.
+https://www.linuxfoundation.org/collaborate/workgroups/networking/napi
 
 
 Flow Control
 
 
 Flow Control
@@ -460,6 +668,26 @@ ethtool -A eth? autoneg off rx on tx on
 NOTE: You must have a flow control capable link partner.
 
 
 NOTE: You must have a flow control capable link partner.
 
 
+RSS Hash Flow
+-------------
+
+Allows you to set the hash bytes per flow type and any combination of one or
+more options for Receive Side Scaling (RSS) hash byte configuration.
+
+#ethtool –N <dev> rx-flow-hash <type> <option>
+
+Where <type> is:
+  tcp4 signifying TCP over IPv4
+  udp4 signifying UDP over IPv4
+  tcp6 signifying TCP over IPv6
+  udp6 signifying UDP over IPv6
+And <option> is one or more of:
+  s    Hash on the IP source address of the rx packet.
+  d    Hash on the IP destination address of the rx packet.
+  f    Hash on bytes 0 and 1 of the Layer 4 header of the rx packet.
+  n    Hash on bytes 2 and 3 of the Layer 4 header of the rx packet.
+
+
 MAC and VLAN anti-spoofing feature
 ----------------------------------
 
 MAC and VLAN anti-spoofing feature
 ----------------------------------
 
@@ -469,42 +697,42 @@ NOTE: This feature can be disabled for a specific Virtual Function (VF).
 ip link set <pf dev> vf <vf id> spoofchk {off|on}
 
 
 ip link set <pf dev> vf <vf id> spoofchk {off|on}
 
 
-Support for UDP RSS
--------------------
-
-This feature adds an ON/OFF switch for hashing over certain flow types. Only
-UDP can be turned on. The default setting is enabled .
-
-
 IEEE 1588 Precision Time Protocol (PTP) Hardware Clock (PHC)
 ------------------------------------------------------------
 
 Precision Time Protocol (PTP) is used to synchronize clocks in a computer
 network and is supported in the i40e driver.
 
 IEEE 1588 Precision Time Protocol (PTP) Hardware Clock (PHC)
 ------------------------------------------------------------
 
 Precision Time Protocol (PTP) is used to synchronize clocks in a computer
 network and is supported in the i40e driver.
 
-I40E_PTP is a compile time flag. The user can enable it at compile time to add
-support for PTP from the driver. The flag is used by editing the make file
-as follows when it is being compiled:
-
->make CFLAGS_EXTRA="-DI40E_PTP" install
 
 
 VXLAN Overlay HW Offloading
 ---------------------------
 
 
 
 VXLAN Overlay HW Offloading
 ---------------------------
 
-VXLAN Overlay HW Offloading is enabled by default. The i40e Linux driver
-features VXLAN Overlay HW Offloading support. To view and configure
-VXLAN on a VXLAN-overlay offload enabled device, use the following
-commands:
+Virtual Extensible LAN (VXLAN) allows you to extend an L2 network over an L3
+network, which may be useful in a virtualized or cloud environment. Some Intel(R)
+Ethernet Network devices perform VXLAN processing, offloading it from the
+operating system. This reduces CPU utilization.
+VXLAN offloading is controlled by the tx and rx checksum offload options
+provided by ethtool. That is, if tx checksum offload is enabled, and the adapter
+has the capability, VXLAN offloading is also enabled. If rx checksum offload is
+enabled, then the VXLAN packets rx checksum will be offloaded, unless the module
+parameter vxlan_rx=0,0 was used to specifically disable the VXLAN rx offload.
+VXLAN Overlay HW Offloading is enabled by default. To view and configure VXLAN
+on a VXLAN-overlay offload enabled device, use the following
+command:
 
   # ethtool -k ethX
    (This command displays the offloads and their current state.)
 
   # ethtool -k ethX
    (This command displays the offloads and their current state.)
-  # ethtool -K ethX tx-udp_tnl-segmentation [off|on]
-   (This enables/disables VXLAN support in the driver.)
 
 
-For more information on configuring your network for VXLAN overlay support,
-refer to the Intel Technical Brief, "Creating Overlay Networks Using Intel
-Ethernet Converged Network Adapters" (Intel Networking Division, August 2013):
+i40e support for VXLAN HW offloading is dependent on
+kernel support of the HW offloading features.
+
+For more information on configuring your network for overlay HW offloading
+support, refer to the Intel Technical Brief, "Creating Overlay Networks
+Using Intel Ethernet Converged Network Adapters" (Intel Networking Division,
+August 2013):
 
 http://www.intel.com/content/dam/www/public/us/en/documents/technology-briefs/
 overlay-networks-using-converged-network-adapters-brief.pdf
 
 http://www.intel.com/content/dam/www/public/us/en/documents/technology-briefs/
 overlay-networks-using-converged-network-adapters-brief.pdf
@@ -544,12 +772,14 @@ bandwidth allocations on each function as follows:
 
 3. Mount /config
 4. Load (or reload) the i40e driver
 
 3. Mount /config
 4. Load (or reload) the i40e driver
-5. Make a new directory under config for each partition upon which you wish
-   to configure the bandwidth.
-6. Three files will appear under the config/partition directory:
+5. Make a new directory under config/i40e for each partition upon which you
+   wish to configure the bandwidth.
+6. The following files will appear under the config/partition directory:
    - max_bw
    - min_bw
    - commit
    - max_bw
    - min_bw
    - commit
+   - ports
+   - partitions
    read from max_bw to get display the current maximum bandwidth setting.
    write to max_bw to set the maximum bandwidth for this function.
    read from min_bw to display the current minimum bandwidth setting.
    read from max_bw to get display the current maximum bandwidth setting.
    write to max_bw to set the maximum bandwidth for this function.
    read from min_bw to display the current minimum bandwidth setting.
@@ -642,33 +872,48 @@ Performance Optimization:
 -------------------------
 
 Driver defaults are meant to fit a wide variety of workloads, but if further
 -------------------------
 
 Driver defaults are meant to fit a wide variety of workloads, but if further
-optimization is required we recommend experimenting with the following settings.
+optimization is required we recommend experimenting with the following
+settings.
 
 Pin the adapter's IRQs to specific cores by disabling the irqbalance service
 
 Pin the adapter's IRQs to specific cores by disabling the irqbalance service
-and using the included set_irq_affinity script.
-The following settings will distribute the IRQs across all the cores evenly:
-  # scripts/set_irq_affinity -x all <interface1> , [ <interface2>, ... ]
-The following settings will distribute the IRQs across all the cores that are
-local to the adapter (same NUMA node):
-  # scripts/set_irq_affinity -x local <interface1> ,[ <interface2>, ... ]
-Please see the script's help text for further options.
+and using the included set_irq_affinity script. Please see the script's help
+text for further options.
+
+  - The following settings will distribute the IRQs across all the cores
+    evenly:
+
+    # scripts/set_irq_affinity -x all <interface1> , [ <interface2>, ... ]
+
+  - The following settings will distribute the IRQs across all the cores that
+    are local to the adapter (same NUMA node):
+
+    # scripts/set_irq_affinity -x local <interface1> ,[ <interface2>, ... ]
 
 For very CPU intensive workloads, we recommend pinning the IRQs to all cores.
 
 For very CPU intensive workloads, we recommend pinning the IRQs to all cores.
+
 For IP Forwarding: Disable Adaptive ITR and lower rx and tx interrupts per
 queue using ethtool.
 For IP Forwarding: Disable Adaptive ITR and lower rx and tx interrupts per
 queue using ethtool.
-# ethtool <interface> adaptive-rx off adaptive-tx off rx-usecs 125 tx-usecs 125
-Setting rx-usecs and tx-usecs to 125 will limit interrupts to about 8000
-interrupts per second per queue.
+
+  - Setting rx-usecs and tx-usecs to 125 will limit interrupts to about 8000
+    interrupts per second per queue.
+
+    # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 125 
+    tx-usecs 125
 
 For lower CPU utilization: Disable Adaptive ITR and lower rx and tx interrupts
 per queue using ethtool.
 
 For lower CPU utilization: Disable Adaptive ITR and lower rx and tx interrupts
 per queue using ethtool.
-# ethtool <interface> adaptive-rx off adaptive-tx off rx-usecs 250 tx-usecs 250
-Setting rx-usecs and tx-usecs to 250 will limit interrupts to about 4000
-interrupts per second per queue.
+
+  - Setting rx-usecs and tx-usecs to 250 will limit interrupts to about 4000
+    interrupts per second per queue.
+
+    # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 250 
+    tx-usecs 250
 
 For lower latency: Disable Adaptive ITR and ITR by setting rx and tx to 0
 using ethtool.
 
 For lower latency: Disable Adaptive ITR and ITR by setting rx and tx to 0
 using ethtool.
-# ethtool <interface> adaptive-rx off adaptive-tx off rx-usecs 0 tx-usecs 0
+
+    # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 0 
+    tx-usecs 0
 
 
 ================================================================================
 
 
 ================================================================================
@@ -678,6 +923,32 @@ Known Issues/Troubleshooting
 ----------------------------
 
 
 ----------------------------
 
 
+Fixing Performance Issues When Using IOMMU in Virtualized Environments
+----------------------------------------------------------------------
+The IOMMU feature of the processor prevents I/O devices from accessing memory
+outside the boundaries set by the OS. It also allows devices to be directly
+assigned to a Virtual Machine. However, IOMMU may affect performance, both
+in latency (each DMA access by the device must be translated by the IOMMU)
+and in CPU utilization (each buffer assigned to every device must be mapped
+in the IOMMU).
+
+If you experience significant performance issues with IOMMU, try using it in
+“passthrough” mode by adding the following to the kernel boot command line:
+  intel_iommu=on iommu=pt
+
+NOTE: This mode enables remapping for assigning devices to VMs, providing
+near-native I/O performance, but does not provide the additional memory
+protection.
+
+
+Transmit hangs leading to no traffic
+------------------------------------
+
+Disabling flow control while the device is under stress may cause tx hangs and
+eventually lead to the device no longer passing traffic. You must reboot the
+system to resolve this issue.
+
+
 Incomplete messages in the system log
 -------------------------------------
 
 Incomplete messages in the system log
 -------------------------------------
 
@@ -695,6 +966,13 @@ When passing non-UDP traffic over a VxLAN interface, the port.rx_csum_bad
 counter increments for the packets.
 
 
 counter increments for the packets.
 
 
+Statistic counters reset when promiscuous mode is changed
+---------------------------------------------------------
+
+Changing promiscuous mode triggers a reset of the physical function driver.
+This will reset the statistic counters.
+
+
 Virtual machine does not get link
 ---------------------------------
 
 Virtual machine does not get link
 ---------------------------------
 
@@ -716,13 +994,6 @@ static MAC address in the host machine. This static MAC address will survive
 a VF driver reload.
 
 
 a VF driver reload.
 
 
-Enabling TSO may cause data integrity issues
---------------------------------------------
-
-Enabling TSO on kernel 3.14 or newer may cause data integrity issues.
-Kernel 3.10 and older do not exhibit this behavior.
-
-
 Changing the number of Rx or Tx queues with ethtool -L may cause a kernel panic
 -------------------------------------------------------------------------------
 
 Changing the number of Rx or Tx queues with ethtool -L may cause a kernel panic
 -------------------------------------------------------------------------------
 
@@ -889,10 +1160,10 @@ For general information, go to the Intel support website at:
 www.intel.com/support/
 
 or the Intel Wired Networking project hosted by Sourceforge at:
 www.intel.com/support/
 
 or the Intel Wired Networking project hosted by Sourceforge at:
-http://sourceforge.net/projects/i40e
+http://sourceforge.net/projects/e1000
 If an issue is identified with the released source code on a supported
 kernel with a supported adapter, email the specific information related to the
 If an issue is identified with the released source code on a supported
 kernel with a supported adapter, email the specific information related to the
-issue to i40e-devel@lists.sf.net.
+issue to e1000-devel@lists.sf.net.
 
 
 ================================================================================
 
 
 ================================================================================
@@ -916,9 +1187,7 @@ St - Fifth Floor, Boston, MA 02110-1301 USA.
 The full GNU General Public License is included in this distribution in the
 file called "COPYING".
 
 The full GNU General Public License is included in this distribution in the
 file called "COPYING".
 
-Intel(R) XL710/X710 Network Driver
-Intel(R) XL710/X710 Virtual Function Network Driver
-Copyright(c) 2014-2015 Intel Corporation.
+Copyright(c) 2014-2016 Intel Corporation.
 ================================================================================
 
 
 ================================================================================
 
 
diff --git a/i40e-dkms/i40e-1.5.18/SUMS b/i40e-dkms/i40e-1.5.18/SUMS
new file mode 100644 (file)
index 0000000..290f44f
--- /dev/null
@@ -0,0 +1,46 @@
+36453     5 i40e-1.5.18/pci.updates
+33977     7 i40e-1.5.18/scripts/set_irq_affinity
+20875     2 i40e-1.5.18/scripts/dump_tables
+05363     3 i40e-1.5.18/i40e.7
+24081     6 i40e-1.5.18/src/i40e_diag.c
+09578    46 i40e-1.5.18/src/i40e_nvm.c
+64861     6 i40e-1.5.18/src/i40e_virtchnl_pf.h
+22166     6 i40e-1.5.18/src/i40e_lan_hmc.h
+48944    84 i40e-1.5.18/src/i40e_debugfs.c
+14633     9 i40e-1.5.18/src/i40e_dcb_nl.c
+58183     4 i40e-1.5.18/src/i40e_fcoe.h
+63581    11 i40e-1.5.18/src/i40e_hmc.c
+29827    72 i40e-1.5.18/src/i40e_adminq_cmd.h
+10518    37 i40e-1.5.18/src/i40e_dcb.c
+27208    30 i40e-1.5.18/src/i40e_adminq.c
+17176     8 i40e-1.5.18/src/common.mk
+54442    78 i40e-1.5.18/src/i40e_virtchnl_pf.c
+63972     8 i40e-1.5.18/src/i40e_hmc.h
+27994     7 i40e-1.5.18/src/Makefile
+51547    23 i40e-1.5.18/src/i40e_ptp.c
+59117     2 i40e-1.5.18/src/i40e_diag.h
+17075    47 i40e-1.5.18/src/i40e_fcoe.c
+30612    55 i40e-1.5.18/src/i40e_type.h
+21794    54 i40e-1.5.18/src/kcompat.c
+44588     1 i40e-1.5.18/src/Module.supported
+52213    24 i40e-1.5.18/src/i40e_prototype.h
+20223   194 i40e-1.5.18/src/i40e_common.c
+07981     3 i40e-1.5.18/src/i40e_alloc.h
+06064   364 i40e-1.5.18/src/i40e_register.h
+10029    16 i40e-1.5.18/src/i40e_txrx.h
+49924   145 i40e-1.5.18/src/kcompat.h
+33326   135 i40e-1.5.18/src/i40e_ethtool.c
+61620    91 i40e-1.5.18/src/i40e_txrx.c
+20253     5 i40e-1.5.18/src/i40e_adminq.h
+56064     7 i40e-1.5.18/src/i40e_dcb.h
+63399    41 i40e-1.5.18/src/i40e_lan_hmc.c
+60171     4 i40e-1.5.18/src/i40e_status.h
+60497    36 i40e-1.5.18/src/i40e.h
+43933   334 i40e-1.5.18/src/i40e_main.c
+57941     4 i40e-1.5.18/src/i40e_helper.h
+23798    14 i40e-1.5.18/src/i40e_virtchnl.h
+32236     2 i40e-1.5.18/src/i40e_devids.h
+58990     5 i40e-1.5.18/src/i40e_osdep.h
+64761    10 i40e-1.5.18/i40e.spec
+02733    18 i40e-1.5.18/COPYING
+65334    46 i40e-1.5.18/README
similarity index 58%
rename from i40e-dkms/i40e-1.3.47/dkms.conf
rename to i40e-dkms/i40e-1.5.18/dkms.conf
index ee5f5c054b86738ca984f37ae032a3c7a8a0d8b4..0628ac6e2dfd7c38c39374486f53cfcf5cf29bf0 100644 (file)
@@ -1,8 +1,8 @@
-MAKE[0]="make -C src/ KERNELDIR=/lib/modules/${kernelver}/build  BUILD_KERNEL=${kernelver}"
+MAKE[0]="'make' -C src/ KSRC=/lib/modules/${kernelver}/build"
 CLEAN="make -C src/ clean"
 BUILT_MODULE_NAME[0]=i40e
 BUILT_MODULE_LOCATION[0]=src/
 DEST_MODULE_LOCATION[0]="/updates"
 PACKAGE_NAME=i40e-dkms
 CLEAN="make -C src/ clean"
 BUILT_MODULE_NAME[0]=i40e
 BUILT_MODULE_LOCATION[0]=src/
 DEST_MODULE_LOCATION[0]="/updates"
 PACKAGE_NAME=i40e-dkms
-PACKAGE_VERSION=1.3.47
+PACKAGE_VERSION=1.5.18
 REMAKE_INITRD=yes
 REMAKE_INITRD=yes
similarity index 92%
rename from i40e-dkms/i40e-1.3.47/i40e.7
rename to i40e-dkms/i40e-1.5.18/i40e.7
index deab3761bc3b16bc0b503142e6a37fd8d58c2916..7941e8b934a8afc6db96393fd1dd04ec59cd5bab 100755 (executable)
@@ -5,9 +5,10 @@
 .\" * Other names and brands may be claimed as the property of others.
 .\"
 .
 .\" * Other names and brands may be claimed as the property of others.
 .\"
 .
-.TH i40e 1 "February 06, 2015"
+.TH i40e 1 "December 10, 2015"
 .SH NAME
 .SH NAME
-i40e \-This file describes the Linux* Base Driver for the Intel Ethernet Controller XL710 Family of Controllers.
+i40e \-This file describes the Linux* Base Driver
+for the Intel Ethernet Controller XL710 Family of Controllers.
 .SH SYNOPSIS
 .PD 0.4v
 modprobe i40e [<option>=<VAL1>,<VAL2>,...]
 .SH SYNOPSIS
 .PD 0.4v
 modprobe i40e [<option>=<VAL1>,<VAL2>,...]
@@ -54,5 +55,5 @@ For general information, go to the Intel support website at:
 .LP
 If an issue is identified with the released source code on a supported
 kernel with a supported adapter, email the specific information related to the
 .LP
 If an issue is identified with the released source code on a supported
 kernel with a supported adapter, email the specific information related to the
-issue to i40e-devel@lists.sf.net.
+issue to e1000-devel@lists.sf.net.
 .LP
 .LP
similarity index 97%
rename from i40e-dkms/i40e-1.3.47/i40e.spec
rename to i40e-dkms/i40e-1.5.18/i40e.spec
index f85a2a8ecd45c4ea2d724535105a4f85b945ce53..118d3041e7010a678d151b00339733bab997dc28 100644 (file)
@@ -1,6 +1,6 @@
 Name: i40e
 Summary: Intel(R) Ethernet Connection XL710 Linux Driver
 Name: i40e
 Summary: Intel(R) Ethernet Connection XL710 Linux Driver
-Version: 1.3.47
+Version: 1.5.18
 Release: 1
 Source: %{name}-%{version}.tar.gz
 Vendor: Intel Corporation
 Release: 1
 Source: %{name}-%{version}.tar.gz
 Vendor: Intel Corporation
@@ -64,12 +64,12 @@ if [ -d /usr/local/share/%{name} ]; then
        rm -rf /usr/local/share/%{name}
 fi
 
        rm -rf /usr/local/share/%{name}
 fi
 
-# Save old drivers (aka .o and .o.gz)
+# Save old drivers (aka .ko and .ko.gz)
 echo "original pci.ids saved in /usr/local/share/%{name}";
 if [ "%{pcitable}" != "/dev/null" ]; then
        echo "original pcitable saved in /usr/local/share/%{name}";
 fi
 echo "original pci.ids saved in /usr/local/share/%{name}";
 if [ "%{pcitable}" != "/dev/null" ]; then
        echo "original pcitable saved in /usr/local/share/%{name}";
 fi
-for k in $(sed 's/\/lib\/modules\/\([0-9a-zA-Z_\.\-]*\).*/\1/' $FL) ; 
+for k in $(sed 's/\/lib\/modules\/\([0-9a-zA-Z_\.\-\+]*\).*/\1/' $FL) ;
 do
        d_drivers=/lib/modules/$k
        d_usr=/usr/local/share/%{name}/$k
 do
        d_drivers=/lib/modules/$k
        d_usr=/usr/local/share/%{name}/$k
@@ -86,11 +86,11 @@ done
 
 # Add driver link
 for f in $(sed 's/\.new$//' $FL) ; do
 
 # Add driver link
 for f in $(sed 's/\.new$//' $FL) ; do
-       ln -f $f.new $f 
+       ln -f $f.new $f
 done
 
 # Check if kernel version rpm was built on IS the same as running kernel
 done
 
 # Check if kernel version rpm was built on IS the same as running kernel
-BK_LIST=$(sed 's/\/lib\/modules\/\([0-9a-zA-Z_\.\-]*\).*/\1/' $FL)
+BK_LIST=$(sed 's/\/lib\/modules\/\([0-9a-zA-Z_\.\-\+]*\).*/\1/' $FL)
 MATCH=no
 for i in $BK_LIST
 do
 MATCH=no
 for i in $BK_LIST
 do
@@ -374,6 +374,9 @@ fi
 
 uname -r | grep BOOT || /sbin/depmod -a > /dev/null 2>&1 || true
 
 
 uname -r | grep BOOT || /sbin/depmod -a > /dev/null 2>&1 || true
 
+echo "Updating initrd..."
+dracut --force
+
 %preun
 # If doing RPM un-install
 if [ $1 -eq 0 ] ; then
 %preun
 # If doing RPM un-install
 if [ $1 -eq 0 ] ; then
diff --git a/i40e-dkms/i40e-1.5.18/pci.updates b/i40e-dkms/i40e-1.5.18/pci.updates
new file mode 100644 (file)
index 0000000..35ffa1f
--- /dev/null
@@ -0,0 +1,108 @@
+################################################################################
+#
+# Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+# Copyright(c) 2013 - 2016 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+# updates for the system pci.ids file
+#
+# IMPORTANT!  Entries in this list must be sorted as they
+#             would appear in the system pci.ids file.  Entries
+#             are sorted by ven, dev, subven, subdev
+#             (numerical order). Also, no OEM names in strings.
+#             The oem vendor id will sub into the name when
+#             the string is consumed by the tools using it.
+#
+8086  Intel Corporation
+       1572  Ethernet Controller X710 for 10GbE SFP+
+               1028 0000  Ethernet 10G X710 rNDC
+               1028 1f99  Ethernet 10G 4P X710/I350 rNDC
+               1028 1f9c  Ethernet 10G 4P X710 SFP+ rNDC
+               103c 0000  Ethernet 10Gb 562SFP+ Adapter
+               103c 22fc  Ethernet 10Gb 2-port 562FLR-SFP+ Adapter
+               103c 22fd  Ethernet 10Gb 2-port 562SFP+ Adapter
+               1137 0000  Ethernet Converged NIC X710-DA
+               1137 013b  Ethernet Converged NIC X710-DA4
+               1137 020a  Ethernet Converged NIC X710-DA2
+               17aa 0000  ThinkServer X710 AnyFabric for 10GbE SFP+
+               17aa 4001  ThinkServer X710-4 AnyFabric for 10GbE SFP+
+               17aa 4002  ThinkServer X710-2 AnyFabric for 10GbE SFP+
+               8086 0000  Ethernet Converged Network Adapter X710
+               8086 0001  Ethernet Converged Network Adapter X710-4
+               8086 0002  Ethernet Converged Network Adapter X710-4
+               8086 0004  Ethernet Converged Network Adapter X710-4
+               8086 0005  Ethernet Converged Network Adapter X710
+               8086 0006  Ethernet Converged Network Adapter X710
+               8086 0007  Ethernet Converged Network Adapter X710-2
+               8086 0008  Ethernet Converged Network Adapter X710-2
+               8086 0009  Ethernet Controller X710 for 10GbE SFP+
+               8086 000a  Ethernet Controller X710 for 10GbE SFP+
+               8086 000d  Ethernet Controller X710 for 10GbE SFP+
+               8086 4005  Ethernet Controller X710 for 10GbE SFP+
+               8086 4006  Ethernet Controller X710 for 10GbE SFP+
+       1574  Ethernet Controller XL710 Emulation
+       1580  Ethernet Controller XL710 for 40GbE backplane
+       1581  Ethernet Controller X710 for 10GbE backplane
+               1028 0000  Ethernet 10G X710-k bNDC
+               1028 1f98  Ethernet 10G 4P X710-k bNDC
+               1028 1f9e  Ethernet 10G 2P X710-k bNDC
+               8086 0000  Ethernet Converged Network Adapter XL710-Q2
+       1583  Ethernet Controller XL710 for 40GbE QSFP+
+               1028 0000  Ethernet 40G 2P XL710 QSFP+ rNDC
+               1028 1f9f  Ethernet 40G 2P XL710 QSFP+ rNDC
+               108e 0000  10Gb/40Gb Ethernet Adapter
+               108e 7b1d  10Gb/40Gb Ethernet Adapter
+               1137 0000  Ethernet Converged NIC XL710-QDA2
+               1137 013c  Ethernet Converged NIC XL710-QDA2
+               8086 0000  Ethernet Converged Network Adapter XL710-Q2
+               8086 0001  Ethernet Converged Network Adapter XL710-Q2
+               8086 0002  Ethernet Converged Network Adapter XL710-Q2
+               8086 0003  Ethernet I/O Module XL710-Q2
+               8086 0004  Ethernet Server Adapter XL710-Q2OCP
+               8086 0006  Ethernet Converged Network Adapter XL710-Q2
+       1584  Ethernet Controller XL710 for 40GbE QSFP+
+               8086 0000  Ethernet Converged Network Adapter XL710-Q1
+               8086 0001  Ethernet Converged Network Adapter XL710-Q1
+               8086 0002  Ethernet Converged Network Adapter XL710-Q1
+               8086 0003  Ethernet I/O Module XL710-Q1
+               8086 0004  Ethernet Server Adapter XL710-Q1OCP
+       1585  Ethernet Controller X710 for 10GbE QSFP+
+       1586  Ethernet Controller X710 for 10GBASE-T
+               108e 0000  Ethernet Controller X710 for 10GBASE-T
+               108e 4857  Ethernet Controller X710 for 10GBASE-T
+       1587  Ethernet Controller XL710 for 20GbE backplane
+               103c 0000  Flex-20 20Gb 2-port 660FLB Adapter
+               103c 22fe  Flex-20 20Gb 2-port 660FLB Adapter
+       1588  Ethernet Controller XL710 for 20GbE backplane
+               103c 0000  Flex-20 20Gb 2-port 660M Adapter
+               103c 22ff  Flex-20 20Gb 2-port 660M Adapter
+       1589  Ethernet Controller X710/X557-AT 10GBASE-T
+               108e 0000  Quad Port 10GBase-T Adapter
+               108e 7b1c  Quad Port 10GBase-T Adapter
+               8086 0000  Ethernet Converged Network Adapter X710-T
+               8086 0001  Ethernet Converged Network Adapter X710-T4
+               8086 0002  Ethernet Converged Network Adapter X710-T4
+       37ce  Ethernet Connection X722 for 10GbE backplane
+       37cf  Ethernet Connection X722 for 10GbE QSFP+
+       37d0  Ethernet Connection X722 for 10GbE SFP+
+       37d1  Ethernet Connection X722 for 1GbE
+       37d2  Ethernet Connection X722 for 10GBASE-T
+       37d3  Ethernet Connection X722 for 10GbE SFP+
+       37d4  Ethernet Connection X722 for 10GbE QSFP+
similarity index 97%
rename from i40e-dkms/i40e-1.3.47/scripts/dump_tables
rename to i40e-dkms/i40e-1.5.18/scripts/dump_tables
index 87d510a70c0fe1a8cef677ee181d0ea08caa10b5..9f160df496f6ca7694117ea75cae01f31660f8ed 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # Dump Tables script
 #!/bin/bash
 # Dump Tables script
-# Copyright (C) 2015 Intel Corporation
+# Copyright (C) 2015 - 2016 Intel Corporation
 #
 # This script is used to generate a dump of the hardware state for
 # sending to linux.nics@intel.com for debugging purposes.  This
 #
 # This script is used to generate a dump of the hardware state for
 # sending to linux.nics@intel.com for debugging purposes.  This
similarity index 89%
rename from i40e-dkms/i40e-1.3.47/scripts/set_irq_affinity
rename to i40e-dkms/i40e-1.5.18/scripts/set_irq_affinity
index b3623574bf7d01193eaf46e799598d9e63f4f929..21413a02bd092f2620eb4758ae4e5d6cfb072c7c 100755 (executable)
@@ -36,8 +36,9 @@
 usage()
 {
        echo
 usage()
 {
        echo
-       echo "Usage: $0 [-x] {all|local|remote|one|custom} [ethX] <[ethY]>"
+       echo "Usage: $0 [-x|-X] {all|local|remote|one|custom} [ethX] <[ethY]>"
        echo "  options: -x             Configure XPS as well as smp_affinity"
        echo "  options: -x             Configure XPS as well as smp_affinity"
+       echo "  options: -X             Disable XPS but set smp_affinity"
        echo "  options: {remote|one} can be followed by a specific node number"
        echo "  Ex: $0 local eth0"
        echo "  Ex: $0 remote 1 eth0"
        echo "  options: {remote|one} can be followed by a specific node number"
        echo "  Ex: $0 local eth0"
        echo "  Ex: $0 remote 1 eth0"
@@ -47,11 +48,37 @@ usage()
        exit 1
 }
 
        exit 1
 }
 
+usageX()
+{
+       echo "options -x and -X cannot both be specified, pick one"
+       exit 1
+}
+
 if [ "$1" == "-x" ]; then
        XPS_ENA=1
        shift
 fi
 
 if [ "$1" == "-x" ]; then
        XPS_ENA=1
        shift
 fi
 
+if [ "$1" == "-X" ]; then
+       if [ -n "$XPS_ENA" ]; then
+               usageX
+       fi
+       XPS_DIS=2
+       shift
+fi
+
+if [ "$1" == -x ]; then
+       usageX
+fi
+
+if [ -n "$XPS_ENA" ] && [ -n "$XPS_DIS" ]; then
+       usageX
+fi
+
+if [ -z "$XPS_ENA" ]; then
+       XPS_ENA=$XPS_DIS
+fi
+
 num='^[0-9]+$'
 # Vars
 AFF=$1
 num='^[0-9]+$'
 # Vars
 AFF=$1
@@ -106,10 +133,18 @@ set_affinity()
 
        printf "%s" $MASK > /proc/irq/$IRQ/smp_affinity
        printf "%s %d %s -> /proc/irq/$IRQ/smp_affinity\n" $IFACE $core $MASK
 
        printf "%s" $MASK > /proc/irq/$IRQ/smp_affinity
        printf "%s %d %s -> /proc/irq/$IRQ/smp_affinity\n" $IFACE $core $MASK
-       if ! [ -z "$XPS_ENA" ]; then
+       case "$XPS_ENA" in
+       1)
                printf "%s %d %s -> /sys/class/net/%s/queues/tx-%d/xps_cpus\n" $IFACE $core $MASK $IFACE $((n-1))
                printf "%s" $MASK > /sys/class/net/$IFACE/queues/tx-$((n-1))/xps_cpus
                printf "%s %d %s -> /sys/class/net/%s/queues/tx-%d/xps_cpus\n" $IFACE $core $MASK $IFACE $((n-1))
                printf "%s" $MASK > /sys/class/net/$IFACE/queues/tx-$((n-1))/xps_cpus
-       fi
+       ;;
+       2)
+               MASK=0
+               printf "%s %d %s -> /sys/class/net/%s/queues/tx-%d/xps_cpus\n" $IFACE $core $MASK $IFACE $((n-1))
+               printf "%s" $MASK > /sys/class/net/$IFACE/queues/tx-$((n-1))/xps_cpus
+       ;;
+       *)
+       esac
 }
 
 # Allow usage of , or -
 }
 
 # Allow usage of , or -
diff --git a/i40e-dkms/i40e-1.5.18/src/Makefile b/i40e-dkms/i40e-1.5.18/src/Makefile
new file mode 100644 (file)
index 0000000..8c1483c
--- /dev/null
@@ -0,0 +1,180 @@
+################################################################################
+#
+# Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+# Copyright(c) 2013 - 2016 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+ifneq ($(KERNELRELEASE),)
+# kbuild part of makefile
+#
+# Makefile for the Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+#
+
+obj-$(CONFIG_I40E) += i40e.o
+
+i40e-y := i40e_main.o \
+       i40e_ethtool.o \
+       i40e_adminq.o \
+       i40e_common.o \
+       i40e_hmc.o \
+       i40e_lan_hmc.o \
+       i40e_nvm.o \
+       i40e_debugfs.o \
+       i40e_diag.o \
+       i40e_txrx.o \
+       i40e_ptp.o \
+       i40e_virtchnl_pf.o
+
+i40e-$(CONFIG_DCB) += i40e_dcb.o i40e_dcb_nl.o
+i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
+i40e-y += kcompat.o
+
+else   # ifneq($(KERNELRELEASE),)
+# normal makefile
+
+DRIVER := i40e
+
+ifeq (,$(wildcard common.mk))
+  $(error Cannot find common.mk build rules)
+else
+  include common.mk
+endif
+
+# i40e does not support building on kernels older than 2.6.32
+$(call minimum_kver_check,2,6,32)
+
+############################
+# Module Install Directory #
+############################
+
+# Default to using updates/drivers/net/ethernet/intel/ path, since depmod since
+# v3.1 defaults to checking updates folder first, and only checking kernels/
+# and extra afterwards. We use updates instead of kernel/* due to desire to
+# prevent over-writing built-in modules files.
+INSTALL_MOD_DIR ?= updates/drivers/net/ethernet/intel/${DRIVER}
+
+######################
+# Kernel Build Macro #
+######################
+
+# kernel build function
+# ${1} is the kernel build target
+# ${2] may contain any extra rules to pass directly to the sub-make process
+kernelbuild = ${MAKE} $(if ${GCC_I_SYS},CC:="${GCC_I_SYS}") \
+                      $(if ${EXTRA_CFLAGS},ccflags-y:="${EXTRA_CFLAGS}") \
+                      -C ${KSRC} \
+                      $(if ${KOBJ},O:=${KOBJ}) \
+                      CONFIG_${DRIVER_UPPERCASE}=m \
+                      M:=$(call readlink,.) \
+                      $(if ${INSTALL_MOD_PATH},INSTALL_MOD_PATH:=${INSTALL_MOD_PATH}) \
+                      INSTALL_MOD_DIR:=${INSTALL_MOD_DIR} \
+                      ${2} ${1};
+
+###############
+# Build rules #
+###############
+
+# Standard compilation, with regular output
+default:
+       @+$(call kernelbuild,modules)
+
+# Noisy output, for extra debugging
+noisy:
+       @+$(call kernelbuild,modules,V=1)
+
+# Silence any output generated
+silent:
+       @+$(call kernelbuild,modules,>/dev/null)
+
+# Enable higher warning level
+checkwarnings: clean
+       @+$(call kernelbuild,modules,W=1)
+
+# Run sparse static analyzer
+sparse: clean
+       @+$(call kernelbuild,modules,C=2 CF="-D__CHECK_ENDIAN__ -Wbitwise -Wcontext")
+
+# Run coccicheck static analyzer
+ccc: clean
+       @+$(call kernelbuild,modules,coccicheck MODE=report)
+
+# Build manfiles
+manfile:
+       @gzip -c ../${DRIVER}.${MANSECTION} > ${DRIVER}.${MANSECTION}.gz
+
+# Clean the module subdirectories
+clean:
+       @+$(call kernelbuild,clean)
+       @-rm -rf *.${MANSECTION}.gz *.ko
+
+# Install the modules and manpage
+install: default manfile
+       @echo "Copying manpages..."
+       @install -D -m 644 ${DRIVER}.${MANSECTION}.gz ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz
+       @echo "Installing modules..."
+       @$(call kernelbuild,modules_install)
+       @echo "Running depmod..."
+       @$(call cmd_depmod)
+ifeq (${cmd_initrd},)
+       @echo "Unable to update initrd. You may need to do this manually."
+else
+       @echo "Updating initrd..."
+       -@$(call cmd_initrd)
+endif
+
+uninstall:
+       rm -f ${INSTALL_MOD_PATH}/lib/modules/${KVER}/${INSTALL_MOD_DIR}/${DRIVER}.ko;
+       $(call cmd_depmod)
+       if [ -e ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ] ; then \
+               rm -f ${INSTALL_MOD_PATH}${MANDIR}/man${MANSECTION}/${DRIVER}.${MANSECTION}.gz ; \
+       fi;
+
+########
+# Help #
+########
+help:
+       @echo 'Cleaning targets:'
+       @echo '  clean               - Clean files generated by kernel module build'
+       @echo 'Build targets:'
+       @echo '  default             - Build module(s) with standard verbosity'
+       @echo '  noisy               - Build module(s) with V=1 verbosity -- very noisy'
+       @echo '  silent              - Build module(s), squelching all output'
+       @echo 'Static Analysis:'
+       @echo '  checkwarnings       - Clean, then build module(s) with W=1 warnings enabled'
+       @echo '  sparse              - Clean, then check module(s) using sparse'
+       @echo '  ccc                 - Clean, then check module(s) using coccicheck'
+       @echo 'Other targets:'
+       @echo '  manfile             - Generate a gzipped manpage'
+       @echo '  install             - Build then install the module(s) and manpage'
+       @echo '  uninstall           - Uninstall the module(s) and manpage'
+       @echo '  help                - Display this help message'
+       @echo 'Variables:'
+       @echo '  LINUX_VERSION       - Debug tool to force kernel LINUX_VERSION_CODE. Use at your own risk.'
+       @echo '  W=N                 - Kernel variable for setting warning levels'
+       @echo '  V=N                 - Kernel variable for setting output verbosity'
+       @echo '  INSTALL_MOD_PATH    - Add prefix for the module and manpage installation path'
+       @echo '  INSTALL_MOD_DIR     - Use module directory other than updates/drivers/net/ethernet/intel/${DRIVER}'
+       @echo '  KSRC                - Specifies the full path to the kernel tree to build against'
+       @echo ' Other variables may be available for tuning make process, see'
+       @echo ' Kernel Kbuild documentation for more information'
+
+.PHONY: default noisy clean manfile silent sparse ccc install uninstall help
+
+endif  # ifneq($(KERNELRELEASE),)
diff --git a/i40e-dkms/i40e-1.5.18/src/common.mk b/i40e-dkms/i40e-1.5.18/src/common.mk
new file mode 100644 (file)
index 0000000..8e129b5
--- /dev/null
@@ -0,0 +1,232 @@
+################################################################################
+#
+# Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+# Copyright(c) 2013 - 2016 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+# common Makefile rules useful for out-of-tree Linux driver builds
+
+#####################
+# Helpful functions #
+#####################
+
+readlink = $(shell readlink -f ${1})
+
+# helper functions for converting kernel version to version codes
+get_kver = $(or $(word ${2},$(subst ., ,${1})),0)
+get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \
+                       [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \
+                       [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \
+                       printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) )
+
+################
+# depmod Macro #
+################
+
+cmd_depmod = /sbin/depmod $(if ${SYSTEM_MAP_FILE},-e -F ${SYSTEM_MAP_FILE}) \
+                          $(if $(strip ${INSTALL_MOD_PATH}),-b ${INSTALL_MOD_PATH}) \
+                          -a ${KVER}
+
+################
+# initrd Macro #
+################
+
+cmd_initrd := $(shell \
+                if which dracut > /dev/null 2>&1 ; then \
+                    echo "dracut --force"; \
+                elif which mkinitrd > /dev/null 2>&1 ; then \
+                    echo "mkinitrd"; \
+                elif which update-initramfs > /dev/null 2>&1 ; then \
+                    echo "update-initramfs -u"; \
+                fi )
+
+#####################
+# Environment tests #
+#####################
+
+DRIVER_UPPERCASE := $(shell echo ${DRIVER} | tr "[:lower:]" "[:upper:]")
+
+ifeq (,${BUILD_KERNEL})
+BUILD_KERNEL=$(shell uname -r)
+endif
+
+# Kernel Search Path
+# All the places we look for kernel source
+KSP :=  /lib/modules/${BUILD_KERNEL}/build \
+        /lib/modules/${BUILD_KERNEL}/source \
+        /usr/src/linux-${BUILD_KERNEL} \
+        /usr/src/linux-$(${BUILD_KERNEL} | sed 's/-.*//') \
+        /usr/src/kernel-headers-${BUILD_KERNEL} \
+        /usr/src/kernel-source-${BUILD_KERNEL} \
+        /usr/src/linux-$(${BUILD_KERNEL} | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/') \
+        /usr/src/linux \
+        /usr/src/kernels/${BUILD_KERNEL} \
+        /usr/src/kernels
+
+# prune the list down to only values that exist and have an include/linux
+# sub-directory. We can't use include/config because some older kernels don't
+# have this.
+test_dir = $(shell [ -e ${dir}/include/linux ] && echo ${dir})
+KSP := $(foreach dir, ${KSP}, ${test_dir})
+
+# we will use this first valid entry in the search path
+ifeq (,${KSRC})
+  KSRC := $(firstword ${KSP})
+endif
+
+ifeq (,${KSRC})
+  $(warning *** Kernel header files not in any of the expected locations.)
+  $(warning *** Install the appropriate kernel development package, e.g.)
+  $(error kernel-devel, for building kernel modules and try again)
+else
+ifeq (/lib/modules/${BUILD_KERNEL}/source, ${KSRC})
+  KOBJ :=  /lib/modules/${BUILD_KERNEL}/build
+else
+  KOBJ :=  ${KSRC}
+endif
+endif
+
+# Version file Search Path
+VSP :=  ${KOBJ}/include/generated/utsrelease.h \
+        ${KOBJ}/include/linux/utsrelease.h \
+        ${KOBJ}/include/linux/version.h \
+        ${KOBJ}/include/generated/uapi/linux/version.h \
+        /boot/vmlinuz.version.h
+
+# Config file Search Path
+CSP :=  ${KOBJ}/include/generated/autoconf.h \
+        ${KOBJ}/include/linux/autoconf.h \
+        /boot/vmlinuz.autoconf.h
+
+# System.map Search Path (for depmod)
+MSP := ${KSRC}/System.map \
+       /boot/System.map-${BUILD_KERNEL}
+
+# prune the lists down to only files that exist
+test_file = $(shell [ -f ${file} ] && echo ${file})
+VSP := $(foreach file, ${VSP}, ${test_file})
+CSP := $(foreach file, ${CSP}, ${test_file})
+MSP := $(foreach file, ${MSP}, ${test_file})
+
+
+# and use the first valid entry in the Search Paths
+ifeq (,${VERSION_FILE})
+  VERSION_FILE := $(firstword ${VSP})
+endif
+
+ifeq (,${CONFIG_FILE})
+  CONFIG_FILE := $(firstword ${CSP})
+endif
+
+ifeq (,${SYSTEM_MAP_FILE})
+  SYSTEM_MAP_FILE := $(firstword ${MSP})
+endif
+
+ifeq (,$(wildcard ${VERSION_FILE}))
+  $(error Linux kernel source not configured - missing version header file)
+endif
+
+ifeq (,$(wildcard ${CONFIG_FILE}))
+  $(error Linux kernel source not configured - missing autoconf.h)
+endif
+
+ifeq (,$(wildcard ${SYSTEM_MAP_FILE}))
+  $(warning Missing System.map file - depmod will not check for missing symbols)
+endif
+
+#######################
+# Linux Version Setup #
+#######################
+
+# The following command line parameter is intended for development of KCOMPAT
+# against upstream kernels such as net-next which have broken or non-updated
+# version codes in their Makefile. They are intended for debugging and
+# development purpose only so that we can easily test new KCOMPAT early. If you
+# don't know what this means, you do not need to set this flag. There is no
+# arcane magic here.
+
+# Convert LINUX_VERSION into LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION},)
+  LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3))
+endif
+
+# Honor LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION_CODE},)
+  $(warning Forcing target kernel to build with LINUX_VERSION_CODE of ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}). Do this at your own risk.)
+  KVER_CODE := ${LINUX_VERSION_CODE}
+  EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE}
+endif
+
+EXTRA_CFLAGS += ${CFLAGS_EXTRA}
+
+# get the kernel version - we use this to find the correct install path
+KVER := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VERSION_FILE} | grep UTS_RELEASE | \
+        awk '{ print $$3 }' | sed 's/\"//g')
+
+# assume source symlink is the same as build, otherwise adjust KOBJ
+ifneq (,$(wildcard /lib/modules/${KVER}/build))
+  ifneq (${KSRC},$(call readlink,/lib/modules/${KVER}/build))
+    KOBJ=/lib/modules/${KVER}/build
+  endif
+endif
+
+ifeq (${KVER_CODE},)
+  KVER_CODE := $(shell ${CC} ${EXTRA_CFLAGS} -E -dM ${VSP} 2> /dev/null |\
+                 grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
+endif
+
+# minimum_kver_check
+#
+# helper function to provide uniform output for different drivers to abort the
+# build based on kernel version check. Usage: "$(call minimum_kver_check,2,6,XX)".
+define _minimum_kver_check
+ifeq (0,$(shell [ ${KVER_CODE} -lt $(call get_kvercode,${1},${2},${3}) ]; echo "$$?"))
+  $$(warning *** Aborting the build.)
+  $$(error This driver is not supported on kernel versions older than ${1}.${2}.${3})
+endif
+endef
+minimum_kver_check = $(eval $(call _minimum_kver_check,${1},${2},${3}))
+
+################
+# Manual Pages #
+################
+
+MANSECTION = 7
+
+ifeq (,${MANDIR})
+  # find the best place to install the man page
+  MANPATH := $(shell (manpath 2>/dev/null || echo $MANPATH) | sed 's/:/ /g')
+  ifneq (,${MANPATH})
+    # test based on inclusion in MANPATH
+    test_dir = $(findstring ${dir}, ${MANPATH})
+  else
+    # no MANPATH, test based on directory existence
+    test_dir = $(shell [ -e ${dir} ] && echo ${dir})
+  endif
+  # our preferred install path
+  # should /usr/local/man be in here ?
+  MANDIR := /usr/share/man /usr/man
+  MANDIR := $(foreach dir, ${MANDIR}, ${test_dir})
+  MANDIR := $(firstword ${MANDIR})
+endif
+ifeq (,${MANDIR})
+  # fallback to /usr/man
+  MANDIR := /usr/man
+endif
similarity index 75%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e.h
rename to i40e-dkms/i40e-1.5.18/src/i40e.h
index 46fa83b851a4bc48b6782f7ea22578e0e1f579b1..7742633f9d5b973c04539e2571457b36f114c807 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -29,7 +26,6 @@
 
 #include <net/tcp.h>
 #include <net/udp.h>
 
 #include <net/tcp.h>
 #include <net/udp.h>
-#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/string.h>
 #include <linux/in.h>
 #include <linux/ip.h>
-#include <linux/tcp.h>
 #include <linux/pkt_sched.h>
 #include <linux/ipv6.h>
 #include <linux/pkt_sched.h>
 #include <linux/ipv6.h>
-#ifdef NETIF_F_TSO
 #include <net/checksum.h>
 #include <net/checksum.h>
-#ifdef NETIF_F_TSO6
+#include <net/ipv6.h>
 #include <net/ip6_checksum.h>
 #include <net/ip6_checksum.h>
-#endif
-#endif
 #ifdef SIOCETHTOOL
 #include <linux/ethtool.h>
 #endif
 #ifdef SIOCETHTOOL
 #include <linux/ethtool.h>
 #endif
@@ -79,9 +71,6 @@
 #include "i40e_dcb.h"
 
 /* Useful i40e defaults */
 #include "i40e_dcb.h"
 
 /* Useful i40e defaults */
-#define I40E_BASE_PF_SEID     16
-#define I40E_BASE_VSI_SEID    512
-#define I40E_BASE_VEB_SEID    288
 #define I40E_MAX_VEB          16
 
 #define I40E_MAX_NUM_DESCRIPTORS      4096
 #define I40E_MAX_VEB          16
 
 #define I40E_MAX_NUM_DESCRIPTORS      4096
 #define I40E_MIN_MSIX                 2
 #define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */
 #define I40E_MIN_VSI_ALLOC            51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
 #define I40E_MIN_MSIX                 2
 #define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */
 #define I40E_MIN_VSI_ALLOC            51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
-#define i40e_default_queues_per_vmdq(pf)  1 /* max 16 qps */
+/* max 16 qps */
+#define i40e_default_queues_per_vmdq(pf) \
+               (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
 #define I40E_DEFAULT_QUEUES_PER_VF    4
 #define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
 #define I40E_DEFAULT_QUEUES_PER_VF    4
 #define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
-#define i40e_pf_get_max_q_per_tc(pf)  64 /* should be a power of 2 */
+#define i40e_pf_get_max_q_per_tc(pf) \
+               (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
 #define I40E_FDIR_RING                0
 #define I40E_FDIR_RING_COUNT          32
 #ifdef I40E_FCOE
 #define I40E_FDIR_RING                0
 #define I40E_FDIR_RING_COUNT          32
 #ifdef I40E_FCOE
 #define I40E_MAX_AQ_BUF_SIZE          4096
 #define I40E_AQ_LEN                   256
 #define I40E_AQ_WORK_LIMIT            66 /* max number of VFs + a little */
 #define I40E_MAX_AQ_BUF_SIZE          4096
 #define I40E_AQ_LEN                   256
 #define I40E_AQ_WORK_LIMIT            66 /* max number of VFs + a little */
+/*
+ * If I40E_MAX_USER_PRIORITY is updated please also update
+ * I40E_CLIENT_MAX_USER_PRIORITY in i40e_client.h and i40evf_client.h
+ */
 #define I40E_MAX_USER_PRIORITY        8
 #define I40E_DEFAULT_MSG_ENABLE       4
 #define I40E_QUEUE_WAIT_RETRY_LIMIT   10
 #define I40E_MAX_USER_PRIORITY        8
 #define I40E_DEFAULT_MSG_ENABLE       4
 #define I40E_QUEUE_WAIT_RETRY_LIMIT   10
 
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 /* Ethtool Private Flags */
 
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 /* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_MFP_FLAG       BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG  BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR         BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS      BIT(3)
+#define I40E_PRIV_FLAGS_MFP_FLAG               BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG          BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR                 BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS              BIT(3)
+#define I40E_PRIV_FLAGS_HW_ATR_EVICT           BIT(4)
+#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT   BIT(5)
 #endif
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #endif
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_OEM_VER_PATCH_MASK    0xff
 #define I40E_OEM_VER_BUILD_SHIFT   8
 #define I40E_OEM_VER_SHIFT         24
 #define I40E_OEM_VER_PATCH_MASK    0xff
 #define I40E_OEM_VER_BUILD_SHIFT   8
 #define I40E_OEM_VER_SHIFT         24
+#define I40E_PHY_DEBUG_PORT        BIT(4)
 
 /* The values in here are decimal coded as hex as is the case in the NVM map*/
 #define I40E_CURRENT_NVM_VERSION_HI 0x2
 
 /* The values in here are decimal coded as hex as is the case in the NVM map*/
 #define I40E_CURRENT_NVM_VERSION_HI 0x2
 #define XSTRINGIFY(bar) STRINGIFY(bar)
 
 #define I40E_RX_DESC(R, i)                     \
 #define XSTRINGIFY(bar) STRINGIFY(bar)
 
 #define I40E_RX_DESC(R, i)                     \
-       ((ring_is_16byte_desc_enabled(R))       \
-               ? (union i40e_32byte_rx_desc *) \
-                       (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
-               : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+       (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
 #define I40E_TX_DESC(R, i)                     \
        (&(((struct i40e_tx_desc *)((R)->desc))[i]))
 #define I40E_TX_CTXTDESC(R, i)                 \
 #define I40E_TX_DESC(R, i)                     \
        (&(((struct i40e_tx_desc *)((R)->desc))[i]))
 #define I40E_TX_CTXTDESC(R, i)                 \
 /* default to trying for four seconds */
 #define I40E_TRY_LINK_TIMEOUT (4 * HZ)
 
 /* default to trying for four seconds */
 #define I40E_TRY_LINK_TIMEOUT (4 * HZ)
 
+/**
+ * i40e_is_mac_710 - Return true if MAC is X710/XL710
+ * @hw: ptr to the hardware info
+ **/
+static inline bool i40e_is_mac_710(struct i40e_hw *hw)
+{
+       if ((hw->mac.type == I40E_MAC_X710) ||
+           (hw->mac.type == I40E_MAC_XL710))
+               return true;
+
+       return false;
+}
+
 /* driver state flags */
 enum i40e_state_t {
        __I40E_TESTING,
 /* driver state flags */
 enum i40e_state_t {
        __I40E_TESTING,
@@ -177,6 +186,7 @@ enum i40e_state_t {
        __I40E_FD_FLUSH_REQUESTED,
        __I40E_RESET_FAILED,
        __I40E_PORT_TX_SUSPENDED,
        __I40E_FD_FLUSH_REQUESTED,
        __I40E_RESET_FAILED,
        __I40E_PORT_TX_SUSPENDED,
+       __I40E_PTP_TX_IN_PROGRESS,
        __I40E_VF_DISABLE,
 };
 
        __I40E_VF_DISABLE,
 };
 
@@ -195,11 +205,17 @@ struct i40e_lump_tracking {
 
 #define I40E_DEFAULT_ATR_SAMPLE_RATE   20
 #define I40E_FDIR_MAX_RAW_PACKET_SIZE  512
 
 #define I40E_DEFAULT_ATR_SAMPLE_RATE   20
 #define I40E_FDIR_MAX_RAW_PACKET_SIZE  512
+#define I40E_MAX_PARSE_BYTE            480
+#define I40E_TCPIP_DUMMY_PACKET_LEN    54
+#define I40E_UDPIP_DUMMY_PACKET_LEN    42
+#define I40E_IP_DUMMY_PACKET_LEN       34
 #define I40E_FDIR_BUFFER_FULL_MARGIN   10
 #define I40E_FDIR_BUFFER_HEAD_ROOM     32
 #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
 
 #define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
 #define I40E_FDIR_BUFFER_FULL_MARGIN   10
 #define I40E_FDIR_BUFFER_HEAD_ROOM     32
 #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
 
 #define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
+#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
 
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
 
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
@@ -217,7 +233,7 @@ enum i40e_fd_stat_idx {
 
 struct i40e_fdir_filter {
        struct hlist_node fdir_node;
 
 struct i40e_fdir_filter {
        struct hlist_node fdir_node;
-       /* filter ipnut set */
+       /* filter input set */
        u8 flow_type;
        u8 ip4_proto;
        /* TX packet view of src and dst */
        u8 flow_type;
        u8 ip4_proto;
        /* TX packet view of src and dst */
@@ -226,6 +242,11 @@ struct i40e_fdir_filter {
        __be16 src_port;
        __be16 dst_port;
        __be32 sctp_v_tag;
        __be16 src_port;
        __be16 dst_port;
        __be32 sctp_v_tag;
+#define I40E_MAX_FLEX_FILTER           8
+       __be16 flex_bytes[I40E_MAX_FLEX_FILTER];
+       __be16 flex_mask[I40E_MAX_FLEX_FILTER];
+       u64 flex_mask_bit;
+
        /* filter control */
        u16 q_index;
        u8  flex_off;
        /* filter control */
        u16 q_index;
        u8  flex_off;
@@ -237,7 +258,11 @@ struct i40e_fdir_filter {
        u32 fd_id;
 };
 
        u32 fd_id;
 };
 
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+#ifndef FLOW_TYPE_MASK
+#define FLOW_TYPE_MASK  0xFF
+#else
+#error FLOW_TYPE_MASK already defined elsewhere
+#endif
 
 #define I40E_CLOUD_FIELD_OMAC  0x01
 #define I40E_CLOUD_FIELD_IMAC  0x02
 
 #define I40E_CLOUD_FIELD_OMAC  0x01
 #define I40E_CLOUD_FIELD_IMAC  0x02
@@ -268,22 +293,19 @@ struct i40e_cloud_filter {
        __be32 inner_ip[4];
        u32 tenant_id;
        u8 flags;
        __be32 inner_ip[4];
        u32 tenant_id;
        u8 flags;
-#define I40E_CLOUD_TNL_TYPE_XVLAN    1
+#define I40E_CLOUD_TNL_TYPE_NONE       0xff
        u8 tunnel_type;
        /* filter control */
        u8 tunnel_type;
        /* filter control */
-       u16 vsi_id; /* vsi number */
+       u16 seid;
        u16 queue_id;
        u32 id;
 };
 
        u16 queue_id;
        u32 id;
 };
 
-#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
-
 #define I40E_ETH_P_LLDP                        0x88cc
 
 #define I40E_DCB_PRIO_TYPE_STRICT      0
 #define I40E_DCB_PRIO_TYPE_ETS         1
 #define I40E_DCB_STRICT_PRIO_CREDITS   127
 #define I40E_ETH_P_LLDP                        0x88cc
 
 #define I40E_DCB_PRIO_TYPE_STRICT      0
 #define I40E_DCB_PRIO_TYPE_ETS         1
 #define I40E_DCB_STRICT_PRIO_CREDITS   127
-#define I40E_MAX_USER_PRIORITY 8
 /* DCB per TC information data structure */
 struct i40e_tc_info {
        u16     qoffset;        /* Queue offset from base queue */
 /* DCB per TC information data structure */
 struct i40e_tc_info {
        u16     qoffset;        /* Queue offset from base queue */
@@ -298,6 +320,35 @@ struct i40e_tc_configuration {
        struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
 };
 
        struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
 };
 
+struct i40e_udp_port_config {
+       __be16 index;
+       u8 type;
+};
+
+/* macros related to FLX_PIT */
+#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
+                                   I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
+                                   I40E_PRTQF_FLX_PIT_FSIZE_MASK)
+#define I40E_FLEX_SET_DST_WORD(dst) (((dst) << \
+                                    I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
+                                    I40E_PRTQF_FLX_PIT_DEST_OFF_MASK)
+#define I40E_FLEX_SET_SRC_WORD(src) (((src) << \
+                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
+                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK)
+#define I40E_FLEX_PREP_VAL(dst, fsize, src) (I40E_FLEX_SET_DST_WORD(dst) | \
+                                            I40E_FLEX_SET_FSIZE(fsize) | \
+                                            I40E_FLEX_SET_SRC_WORD(src))
+
+#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \
+                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \
+                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \
+                                    I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \
+                                    I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \
+                                      I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \
+                                      I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+
 /* struct that defines the Ethernet device */
 struct i40e_pf {
        struct pci_dev *pdev;
 /* struct that defines the Ethernet device */
 struct i40e_pf {
        struct pci_dev *pdev;
@@ -310,7 +361,7 @@ struct i40e_pf {
        u16 num_vmdq_vsis;         /* num vmdq vsis this PF has set up */
        u16 num_vmdq_qps;          /* num queue pairs per vmdq pool */
        u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */
        u16 num_vmdq_vsis;         /* num vmdq vsis this PF has set up */
        u16 num_vmdq_qps;          /* num queue pairs per vmdq pool */
        u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */
-       u16 num_req_vfs;           /* num vfs requested for this VF */
+       u16 num_req_vfs;           /* num VFs requested for this VF */
        u16 num_vf_qps;            /* num queue pairs per VF */
 #ifdef I40E_FCOE
        u16 num_fcoe_qps;          /* num fcoe queues this PF has set up */
        u16 num_vf_qps;            /* num queue pairs per VF */
 #ifdef I40E_FCOE
        u16 num_fcoe_qps;          /* num fcoe queues this PF has set up */
@@ -319,7 +370,7 @@ struct i40e_pf {
        u16 num_lan_qps;           /* num lan queues this PF has set up */
        u16 num_lan_msix;          /* num queue vectors for the base PF vsi */
        int queues_left;           /* queues left unclaimed */
        u16 num_lan_qps;           /* num lan queues this PF has set up */
        u16 num_lan_msix;          /* num queue vectors for the base PF vsi */
        int queues_left;           /* queues left unclaimed */
-       u16 rss_size;              /* num queues in the RSS array */
+       u16 alloc_rss_size;        /* allocated RSS queues */
        u16 rss_size_max;          /* HW defined max RSS queues */
        u16 fdir_pf_filter_count;  /* num of guaranteed filters for this PF */
        u16 num_alloc_vsi;         /* num VSIs this driver supports */
        u16 rss_size_max;          /* HW defined max RSS queues */
        u16 fdir_pf_filter_count;  /* num of guaranteed filters for this PF */
        u16 num_alloc_vsi;         /* num VSIs this driver supports */
@@ -334,14 +385,48 @@ struct i40e_pf {
        u32 fd_atr_cnt;
        u32 fd_tcp_rule;
 
        u32 fd_atr_cnt;
        u32 fd_tcp_rule;
 
-       __be16  vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
-       u16 pending_vxlan_bitmap;
+       /* Book-keeping of side-band filter count per flow-type.
+        * This is used to detect and handle input set changes for
+        * respective flow-type.
+        */
+       u16 fd_tcp4_filter_cnt;
+       u16 fd_udp4_filter_cnt;
+       u16 fd_sctp4_filter_cnt;
+       u16 fd_ip4_filter_cnt;
+       u16 fd_flex_filter_cnt;
+
+/* Destination word in field vector for flexible payload */
+#define I40E_FLEX_DEST_L4              50
+#define I40E_FLEX_DEST_L3              53
+#define I40E_FLEX_DEST_UNUSED          63
+
+/* Flex PIT register index */
+#define I40E_FLEX_PIT_IDX_START_L4     6
+#define I40E_FLEX_PIT_IDX_START_L3     3
+#define I40E_FLEX_PIT_IDX_START_L2     0
+
+/* GLQF ORT index based on L2/L3/L4 type and values */
+#define I40E_L4_GLQF_ORT_IDX           35
+#define I40E_L3_GLQF_ORT_IDX           34
+#define I40E_L2_GLQF_ORT_IDX           33
+#define I40E_L4_GLQF_ORT_VAL           0x000000E6UL
+#define I40E_L3_GLQF_ORT_VAL           0x000000E3UL
+#define I40E_L2_GLQF_ORT_VAL           0x000000E0UL
+
+/* Max number of flexible payload based flow supported */
+#define I40E_MAX_FLEX_FLOW             8
+#define I40E_MAX_FLEX_PIT_REG          9
+#define I40E_MAX_SRC_WORD_OFFSET       32
+       u64 fd_tcp4_input_set;
+       u64 fd_udp4_input_set;
+       u64 fd_sctp4_input_set;
+       u64 fd_ip4_input_set;
+
+       struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+       u16 pending_udp_bitmap;
 
 
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
        struct hlist_head cloud_filter_list;
        u16 num_cloud_filters;
        struct hlist_head cloud_filter_list;
        u16 num_cloud_filters;
-#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
-
        enum i40e_interrupt_policy int_policy;
        u16 rx_itr_default;
        u16 tx_itr_default;
        enum i40e_interrupt_policy int_policy;
        u16 rx_itr_default;
        u16 tx_itr_default;
@@ -354,39 +439,51 @@ struct i40e_pf {
        struct work_struct service_task;
 
        u64 flags;
        struct work_struct service_task;
 
        u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED              BIT_ULL(1)
-#define I40E_FLAG_MSI_ENABLED                  BIT_ULL(2)
-#define I40E_FLAG_MSIX_ENABLED                 BIT_ULL(3)
-#define I40E_FLAG_RX_1BUF_ENABLED              BIT_ULL(4)
-#define I40E_FLAG_RX_PS_ENABLED                BIT_ULL(5)
-#define I40E_FLAG_RSS_ENABLED                  BIT_ULL(6)
-#define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT         BIT_ULL(8)
-#define I40E_FLAG_NEED_LINK_UPDATE             BIT_ULL(9)
+#define I40E_FLAG_RX_CSUM_ENABLED              BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED                  BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED                 BIT_ULL(3)
+#define I40E_FLAG_RSS_ENABLED                  BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT         BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE             BIT_ULL(9)
 #ifdef I40E_FCOE
 #ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
+#define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
 #endif /* I40E_FCOE */
 #endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL                   BIT_ULL(12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       BIT_ULL(13)
-#define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
-#define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
-#define I40E_FLAG_PROCESS_MDD_EVENT            BIT_ULL(17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT           BIT_ULL(18)
-#define I40E_FLAG_SRIOV_ENABLED                BIT_ULL(19)
-#define I40E_FLAG_DCB_ENABLED                  BIT_ULL(20)
-#define I40E_FLAG_FD_SB_ENABLED                BIT_ULL(21)
-#define I40E_FLAG_FD_ATR_ENABLED               BIT_ULL(22)
+#define I40E_FLAG_IN_NETPOLL                   BIT_ULL(12)
+#define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT            BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT           BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED                        BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED                  BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED                        BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED               BIT_ULL(22)
 #ifdef HAVE_PTP_1588_CLOCK
 #ifdef HAVE_PTP_1588_CLOCK
-#define I40E_FLAG_PTP                          BIT_ULL(25)
+#define I40E_FLAG_PTP                          BIT_ULL(25)
 #endif /* HAVE_PTP_1588_CLOCK */
 #endif /* HAVE_PTP_1588_CLOCK */
-#define I40E_FLAG_MFP_ENABLED                  BIT_ULL(26)
-#define I40E_FLAG_VXLAN_FILTER_SYNC            BIT_ULL(27)
-#define I40E_FLAG_PORT_ID_VALID                BIT_ULL(28)
-#define I40E_FLAG_DCB_CAPABLE                  BIT_ULL(29)
-#define I40E_FLAG_VEB_STATS_ENABLED            BIT_ULL(37)
-#define I40E_FLAG_LINK_POLLING_ENABLED         BIT_ULL(39)
-#define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
-#define I40E_FLAG_NO_PCI_LINK_CHECK            BIT_ULL(41)
+#define I40E_FLAG_MFP_ENABLED                  BIT_ULL(26)
+#define I40E_FLAG_UDP_FILTER_SYNC              BIT_ULL(27)
+#define I40E_FLAG_PORT_ID_VALID                        BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE                  BIT_ULL(29)
+#define I40E_FLAG_RSS_AQ_CAPABLE               BIT_ULL(31)
+#define I40E_FLAG_HW_ATR_EVICT_CAPABLE         BIT_ULL(32)
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE       BIT_ULL(33)
+#define I40E_FLAG_128_QP_RSS_CAPABLE           BIT_ULL(34)
+#define I40E_FLAG_WB_ON_ITR_CAPABLE            BIT_ULL(35)
+#define I40E_FLAG_VEB_STATS_ENABLED            BIT_ULL(37)
+#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE  BIT_ULL(38)
+#define I40E_FLAG_LINK_POLLING_ENABLED         BIT_ULL(39)
+#define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
+#define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE       BIT_ULL(41)
+#define I40E_FLAG_NO_PCI_LINK_CHECK            BIT_ULL(42)
+#define I40E_FLAG_100M_SGMII_CAPABLE           BIT_ULL(43)
+#define I40E_FLAG_RESTART_AUTONEG              BIT_ULL(44)
+#define I40E_FLAG_NO_DCB_SUPPORT               BIT_ULL(45)
+#define I40E_FLAG_USE_SET_LLDP_MIB             BIT_ULL(46)
+#define I40E_FLAG_STOP_FW_LLDP                 BIT_ULL(47)
+#define I40E_FLAG_HAVE_10GBASET_PHY            BIT_ULL(48)
+#define I40E_FLAG_MPLS_HDR_OFFLOAD_CAPABLE     BIT_ULL(49)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT         BIT_ULL(50)
 
        /* tracks features that get auto disabled by errors */
        u64 auto_disable_flags;
 
        /* tracks features that get auto disabled by errors */
        u64 auto_disable_flags;
@@ -438,6 +535,7 @@ struct i40e_pf {
        struct i40e_vf *vf;
        int num_alloc_vfs;      /* actual number of VFs allocated */
        u32 vf_aq_requests;
        struct i40e_vf *vf;
        int num_alloc_vfs;      /* actual number of VFs allocated */
        u32 vf_aq_requests;
+       u32 arq_overflows;      /* Not fatal, possibly indicative of problems */
 
        /* DCBx/DCBNL capability for PF that indicates
         * whether DCBx is managed by firmware or host
 
        /* DCBx/DCBNL capability for PF that indicates
         * whether DCBx is managed by firmware or host
@@ -479,12 +577,13 @@ struct i40e_pf {
        u64 rx_sctp_cso_err;
        u64 rx_ip4_cso_err;
 #endif
        u64 rx_sctp_cso_err;
        u64 rx_ip4_cso_err;
 #endif
-       u16 rss_table_size;
+       u16 rss_table_size; /* HW RSS table size */
        u32 max_bw;
        u32 min_bw;
 
        u32 ioremap_len;
        u32 fd_inv;
        u32 max_bw;
        u32 min_bw;
 
        u32 ioremap_len;
        u32 fd_inv;
+       u16 phy_led_val;
 };
 
 struct i40e_mac_filter {
 };
 
 struct i40e_mac_filter {
@@ -562,6 +661,8 @@ struct i40e_vsi {
        u32 tx_restart;
        u32 tx_busy;
        u64 tx_linearize;
        u32 tx_restart;
        u32 tx_busy;
        u64 tx_linearize;
+       u64 tx_force_wb;
+       u64 tx_lost_interrupt;
        u32 rx_buf_failed;
        u32 rx_page_failed;
 
        u32 rx_buf_failed;
        u32 rx_page_failed;
 
@@ -579,13 +680,13 @@ struct i40e_vsi {
        u16 tx_itr_setting;
        u16 int_rate_limit;  /* value in usecs */
 
        u16 tx_itr_setting;
        u16 int_rate_limit;  /* value in usecs */
 
-       u16 rss_table_size;
-       u16 rss_size;
+       u16 rss_table_size; /* HW RSS table size */
+       u16 rss_size;       /* Allocated RSS queues */
+       u8  *rss_hkey_user; /* User configured hash keys */
+       u8  *rss_lut_user;  /* User configured lookup table entries */
 
        u16 max_frame;
 
        u16 max_frame;
-       u16 rx_hdr_len;
        u16 rx_buf_len;
        u16 rx_buf_len;
-       u8  dtype;
 
        /* List of q_vectors allocated to this VSI */
        struct i40e_q_vector **q_vectors;
 
        /* List of q_vectors allocated to this VSI */
        struct i40e_q_vector **q_vectors;
@@ -603,7 +704,7 @@ struct i40e_vsi {
        u16 num_queue_pairs; /* Used tx and rx pairs */
        u16 num_desc;
        enum i40e_vsi_type type;  /* VSI type, e.g., LAN, FCoE, etc */
        u16 num_queue_pairs; /* Used tx and rx pairs */
        u16 num_desc;
        enum i40e_vsi_type type;  /* VSI type, e.g., LAN, FCoE, etc */
-       u16 vf_id;              /* Virtual function ID for SRIOV VSIs */
+       s16 vf_id;              /* Virtual function ID for SRIOV VSIs */
 
        struct i40e_tc_configuration tc_config;
        struct i40e_aqc_vsi_properties_data info;
 
        struct i40e_tc_configuration tc_config;
        struct i40e_aqc_vsi_properties_data info;
@@ -629,9 +730,6 @@ struct i40e_vsi {
        /* VSI specific handlers */
        irqreturn_t (*irq_handler)(int irq, void *data);
 #ifdef ETHTOOL_GRXRINGS
        /* VSI specific handlers */
        irqreturn_t (*irq_handler)(int irq, void *data);
 #ifdef ETHTOOL_GRXRINGS
-
-       /* current rxnfc data */
-       struct ethtool_rxnfc rxnfc; /* current rss hash opts */
 #endif
 } ____cacheline_internodealigned_in_smp;
 
 #endif
 } ____cacheline_internodealigned_in_smp;
 
@@ -658,6 +756,7 @@ struct i40e_q_vector {
 #endif
        struct rcu_head rcu;    /* to avoid race with update stats on free */
        char name[I40E_INT_NAME_STR_LEN];
 #endif
        struct rcu_head rcu;    /* to avoid race with update stats on free */
        char name[I40E_INT_NAME_STR_LEN];
+       bool arm_wb_state;
 #define ITR_COUNTDOWN_START 100
        u8 itr_countdown;       /* when 0 should adjust ITR */
 } ____cacheline_internodealigned_in_smp;
 #define ITR_COUNTDOWN_START 100
        u8 itr_countdown;       /* when 0 should adjust ITR */
 } ____cacheline_internodealigned_in_smp;
@@ -681,8 +780,8 @@ static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
 
        full_ver = hw->nvm.oem_ver;
        ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
 
        full_ver = hw->nvm.oem_ver;
        ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
-       build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT)
-                I40E_OEM_VER_BUILD_MASK);
+       build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
+                I40E_OEM_VER_BUILD_MASK);
        patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
 
        snprintf(buf, sizeof(buf),
        patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
 
        snprintf(buf, sizeof(buf),
@@ -740,6 +839,43 @@ static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
        return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
 }
 
        return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
 }
 
+/**
+ * i40e_read_fd_input_set - reads value of flow director input set register
+ * @pf: pointer to the PF struct
+ * @addr: register addr
+ *
+ * This function reads value of flow director input set register
+ * specified by 'addr' (which is specific to flow-type)
+ **/
+static inline u64 i40e_read_fd_input_set(struct i40e_pf *pf, u16 addr)
+{
+       u64 val = 0;
+
+       val = (u64)i40e_read_rx_ctl(&pf->hw,
+                                  I40E_PRTQF_FD_INSET(addr, 1)) << 32;
+       val |= (u64)i40e_read_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0));
+
+       return val;
+}
+
+/**
+ * i40e_write_fd_input_set - writes value into flow director input set register
+ * @pf: pointer to the PF struct
+ * @addr: register addr
+ * @val: value to be written
+ *
+ * This function writes specified value to the register specified by 'addr'.
+ * This register is input set register based on flow-type.
+ **/
+static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
+                                          u16 addr, u64 val)
+{
+       i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 1),
+                         (u32)(val >> 32));
+       i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FD_INSET(addr, 0),
+                         (u32)val);
+}
+
 /* needed by i40e_ethtool.c */
 int i40e_up(struct i40e_vsi *vsi);
 void i40e_down(struct i40e_vsi *vsi);
 /* needed by i40e_ethtool.c */
 int i40e_up(struct i40e_vsi *vsi);
 void i40e_down(struct i40e_vsi *vsi);
@@ -747,6 +883,8 @@ extern char i40e_driver_name[];
 extern const char i40e_driver_version_str[];
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
 extern const char i40e_driver_version_str[];
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
 void i40e_update_stats(struct i40e_vsi *vsi);
 void i40e_update_eth_stats(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
 void i40e_update_stats(struct i40e_vsi *vsi);
 void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -762,6 +900,19 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
                             struct i40e_pf *pf, bool add);
 int i40e_add_del_fdir(struct i40e_vsi *vsi,
                      struct i40e_fdir_filter *input, bool add);
                             struct i40e_pf *pf, bool add);
 int i40e_add_del_fdir(struct i40e_vsi *vsi,
                      struct i40e_fdir_filter *input, bool add);
+
+/**
+ * i40e_is_flex_filter - returns true if input filter is flex filter
+ * @input: pointer to fdir filter
+ *
+ * This function determines based on user input (user-def N m )
+ * if it can be classified as flex filter or not.
+ **/
+static inline bool i40e_is_flex_filter(struct i40e_fdir_filter *input)
+{
+       return (input && (!input->flex_bytes[2]) && input->flex_bytes[3] &&
+               (input->flex_mask[3] != cpu_to_be16(~0))) ? true : false;
+}
 void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
 u32 i40e_get_current_fd_count(struct i40e_pf *pf);
 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
 void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
 u32 i40e_get_current_fd_count(struct i40e_pf *pf);
 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
@@ -777,7 +928,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                                        bool is_vf, bool is_netdev);
 void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
                     bool is_vf, bool is_netdev);
                                        bool is_vf, bool is_netdev);
 void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
                     bool is_vf, bool is_netdev);
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                                u16 uplink, u32 param1);
 int i40e_vsi_release(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                                u16 uplink, u32 param1);
 int i40e_vsi_release(struct i40e_vsi *vsi);
@@ -804,14 +955,11 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
 void i40e_veb_release(struct i40e_veb *veb);
 
 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
 void i40e_veb_release(struct i40e_veb *veb);
 
 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
-i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
+int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
 int i40e_add_del_cloud_filter(struct i40e_pf *pf,
 int i40e_add_del_cloud_filter(struct i40e_pf *pf,
-                                        struct i40e_cloud_filter *filter,
-                                        struct i40e_vsi *vsi,
-                                        bool add);
-#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+                             struct i40e_cloud_filter *filter,
+                             bool add);
 void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
 void i40e_pf_reset_stats(struct i40e_pf *pf);
 #ifdef CONFIG_DEBUG_FS
 void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
 void i40e_pf_reset_stats(struct i40e_pf *pf);
 #ifdef CONFIG_DEBUG_FS
@@ -836,6 +984,9 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
        struct i40e_hw *hw = &pf->hw;
        u32 val;
 
        struct i40e_hw *hw = &pf->hw;
        u32 val;
 
+       /* definitely clear the PBA here, as this function is meant to
+        * clean out all previous interrupts AND enable the interrupt
+        */
        val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
              I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
              (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
        val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
              I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
              (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
@@ -843,9 +994,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
        /* skip the flush */
 }
 
        /* skip the flush */
 }
 
-void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
 #ifdef I40E_FCOE
 #ifdef HAVE_NDO_GET_STATS64
 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
 #ifdef I40E_FCOE
 #ifdef HAVE_NDO_GET_STATS64
 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
@@ -877,18 +1027,25 @@ void i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 #endif
 #endif
 int i40e_open(struct net_device *netdev);
 #endif
 #endif
 int i40e_open(struct net_device *netdev);
+int i40e_close(struct net_device *netdev);
 int i40e_vsi_open(struct i40e_vsi *vsi);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
                                             bool is_vf, bool is_netdev);
 int i40e_vsi_open(struct i40e_vsi *vsi);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
                                             bool is_vf, bool is_netdev);
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+                         bool is_vf, bool is_netdev);
 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
                                      bool is_vf, bool is_netdev);
 #ifdef I40E_FCOE
 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
                                      bool is_vf, bool is_netdev);
 #ifdef I40E_FCOE
-int i40e_close(struct net_device *netdev);
+#ifdef NETIF_F_HW_TC
+int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+                   struct tc_to_netdev *tc);
+#else
 int i40e_setup_tc(struct net_device *netdev, u8 tc);
 int i40e_setup_tc(struct net_device *netdev, u8 tc);
+#endif
 void i40e_netpoll(struct net_device *netdev);
 int i40e_fcoe_enable(struct net_device *netdev);
 int i40e_fcoe_disable(struct net_device *netdev);
 void i40e_netpoll(struct net_device *netdev);
 int i40e_fcoe_enable(struct net_device *netdev);
 int i40e_fcoe_disable(struct net_device *netdev);
@@ -930,10 +1087,6 @@ void i40e_ptp_stop(struct i40e_pf *pf);
 #endif /* HAVE_PTP_1588_CLOCK */
 u8 i40e_pf_get_num_tc(struct i40e_pf *pf);
 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
 #endif /* HAVE_PTP_1588_CLOCK */
 u8 i40e_pf_get_num_tc(struct i40e_pf *pf);
 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
-#if IS_ENABLED(CONFIG_CONFIGFS_FS)
-int i40e_configfs_init(void);
-void i40e_configfs_exit(void);
-#endif /* CONFIG_CONFIGFS_FS */
 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
similarity index 95%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_adminq.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_adminq.c
index 58e7ed50e445828bd5c5994bcad52abb60dc567f..1c2830c7660edce5652b3ba963f1920fe6cecb70 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
 #include "i40e_adminq.h"
 #include "i40e_prototype.h"
 
 #include "i40e_adminq.h"
 #include "i40e_prototype.h"
 
-/**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
-       return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
-               desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
-}
-
 /**
  *  i40e_adminq_init_regs - Initialize AdminQ registers
  *  @hw: pointer to the hardware structure
 /**
  *  i40e_adminq_init_regs - Initialize AdminQ registers
  *  @hw: pointer to the hardware structure
@@ -384,7 +371,6 @@ i40e_status i40e_init_asq(struct i40e_hw *hw)
 
        hw->aq.asq.next_to_use = 0;
        hw->aq.asq.next_to_clean = 0;
 
        hw->aq.asq.next_to_use = 0;
        hw->aq.asq.next_to_clean = 0;
-       hw->aq.asq.count = hw->aq.num_asq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_asq_ring(hw);
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -402,6 +388,7 @@ i40e_status i40e_init_asq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.asq.count = hw->aq.num_asq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
        goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -443,7 +430,6 @@ i40e_status i40e_init_arq(struct i40e_hw *hw)
 
        hw->aq.arq.next_to_use = 0;
        hw->aq.arq.next_to_clean = 0;
 
        hw->aq.arq.next_to_use = 0;
        hw->aq.arq.next_to_clean = 0;
-       hw->aq.arq.count = hw->aq.num_arq_entries;
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_arq_ring(hw);
 
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -461,6 +447,7 @@ i40e_status i40e_init_arq(struct i40e_hw *hw)
                goto init_adminq_free_rings;
 
        /* success! */
                goto init_adminq_free_rings;
 
        /* success! */
+       hw->aq.arq.count = hw->aq.num_arq_entries;
        goto init_adminq_exit;
 
 init_adminq_free_rings:
        goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -538,6 +525,24 @@ shutdown_arq_out:
        return ret_code;
 }
 
        return ret_code;
 }
 
+/**
+ *  i40e_resume_aq - resume AQ processing from 0
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_resume_aq(struct i40e_hw *hw)
+{
+       /* Registers are reset after PF reset */
+       hw->aq.asq.next_to_use = 0;
+       hw->aq.asq.next_to_clean = 0;
+
+       i40e_config_asq_regs(hw);
+
+       hw->aq.arq.next_to_use = 0;
+       hw->aq.arq.next_to_clean = 0;
+
+       i40e_config_arq_regs(hw);
+}
+
 /**
  *  i40e_init_adminq - main initialization routine for Admin Queue
  *  @hw: pointer to the hardware structure
 /**
  *  i40e_init_adminq - main initialization routine for Admin Queue
  *  @hw: pointer to the hardware structure
@@ -551,10 +556,11 @@ shutdown_arq_out:
  **/
 i40e_status i40e_init_adminq(struct i40e_hw *hw)
 {
  **/
 i40e_status i40e_init_adminq(struct i40e_hw *hw)
 {
-       i40e_status ret_code;
-       u16 eetrack_lo, eetrack_hi;
        u16 cfg_ptr, oem_hi, oem_lo;
        u16 cfg_ptr, oem_hi, oem_lo;
+       u16 eetrack_lo, eetrack_hi;
+       i40e_status ret_code;
        int retry = 0;
        int retry = 0;
+
        /* verify input for valid configuration */
        if ((hw->aq.num_arq_entries == 0) ||
            (hw->aq.num_asq_entries == 0) ||
        /* verify input for valid configuration */
        if ((hw->aq.num_arq_entries == 0) ||
            (hw->aq.num_asq_entries == 0) ||
@@ -563,8 +569,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
                ret_code = I40E_ERR_CONFIG;
                goto init_adminq_exit;
        }
                ret_code = I40E_ERR_CONFIG;
                goto init_adminq_exit;
        }
-
-       /* initialize spin locks */
        i40e_init_spinlock(&hw->aq.asq_spinlock);
        i40e_init_spinlock(&hw->aq.arq_spinlock);
 
        i40e_init_spinlock(&hw->aq.asq_spinlock);
        i40e_init_spinlock(&hw->aq.arq_spinlock);
 
@@ -625,13 +629,9 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 
        /* pre-emptive resource lock release */
        i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
 
        /* pre-emptive resource lock release */
        i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
-       hw->aq.nvm_release_on_done = false;
+       hw->nvm_release_on_done = false;
        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 
        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 
-       ret_code = i40e_aq_set_hmc_resource_profile(hw,
-                                                   I40E_HMC_PROFILE_DEFAULT,
-                                                   0,
-                                                   NULL);
        ret_code = I40E_SUCCESS;
 
        /* success! */
        ret_code = I40E_SUCCESS;
 
        /* success! */
@@ -662,8 +662,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
 
        i40e_shutdown_asq(hw);
        i40e_shutdown_arq(hw);
 
        i40e_shutdown_asq(hw);
        i40e_shutdown_arq(hw);
-
-       /* destroy the spinlocks */
        i40e_destroy_spinlock(&hw->aq.asq_spinlock);
        i40e_destroy_spinlock(&hw->aq.arq_spinlock);
 
        i40e_destroy_spinlock(&hw->aq.asq_spinlock);
        i40e_destroy_spinlock(&hw->aq.arq_spinlock);
 
@@ -689,7 +687,6 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
 
        desc = I40E_ADMINQ_DESC(*asq, ntc);
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
 
        desc = I40E_ADMINQ_DESC(*asq, ntc);
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
-
        while (rd32(hw, hw->aq.asq.head) != ntc) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
                           "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
        while (rd32(hw, hw->aq.asq.head) != ntc) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
                           "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
@@ -722,7 +719,7 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
  *  Returns true if the firmware has processed all descriptors on the
  *  admin send queue. Returns false if there are still requests pending.
  **/
  *  Returns true if the firmware has processed all descriptors on the
  *  admin send queue. Returns false if there are still requests pending.
  **/
-bool i40e_asq_done(struct i40e_hw *hw)
+static bool i40e_asq_done(struct i40e_hw *hw)
 {
        /* AQ designers suggest use of head for better
         * timing reliability than DD bit
 {
        /* AQ designers suggest use of head for better
         * timing reliability than DD bit
@@ -880,7 +877,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                         */
                        if (i40e_asq_done(hw))
                                break;
                         */
                        if (i40e_asq_done(hw))
                                break;
-                       /* ugh! delay while spin_lock */
                        usleep_range(1000, 2000);
                        total_delay++;
                } while (total_delay < hw->aq.asq_cmd_timeout);
                        usleep_range(1000, 2000);
                        total_delay++;
                } while (total_delay < hw->aq.asq_cmd_timeout);
@@ -974,9 +970,19 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
        u16 flags;
        u16 ntu;
 
        u16 flags;
        u16 ntu;
 
+       /* pre-clean the event info */
+       i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
+
        /* take the lock before we start messing with the ring */
        i40e_acquire_spinlock(&hw->aq.arq_spinlock);
 
        /* take the lock before we start messing with the ring */
        i40e_acquire_spinlock(&hw->aq.arq_spinlock);
 
+       if (hw->aq.arq.count == 0) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQRX: Admin queue not initialized.\n");
+               ret_code = I40E_ERR_QUEUE_EMPTY;
+               goto clean_arq_element_err;
+       }
+
        /* set next_to_use to head */
        ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
        if (ntu == ntc) {
        /* set next_to_use to head */
        ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
        if (ntu == ntc) {
@@ -1036,45 +1042,14 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
        hw->aq.arq.next_to_clean = ntc;
        hw->aq.arq.next_to_use = ntu;
 
        hw->aq.arq.next_to_clean = ntc;
        hw->aq.arq.next_to_use = ntu;
 
+       i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
 clean_arq_element_out:
        /* Set pending if needed, unlock and return */
        if (pending != NULL)
                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
 clean_arq_element_out:
        /* Set pending if needed, unlock and return */
        if (pending != NULL)
                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
        i40e_release_spinlock(&hw->aq.arq_spinlock);
 
        i40e_release_spinlock(&hw->aq.arq_spinlock);
 
-       if (i40e_is_nvm_update_op(&e->desc)) {
-               if (hw->aq.nvm_release_on_done) {
-                       i40e_release_nvm(hw);
-                       hw->aq.nvm_release_on_done = false;
-               }
-
-               switch (hw->nvmupd_state) {
-               case I40E_NVMUPD_STATE_INIT_WAIT:
-                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-                       break;
-
-               case I40E_NVMUPD_STATE_WRITE_WAIT:
-                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
-                       break;
-
-               default:
-                       break;
-               }
-       }
-
        return ret_code;
 }
 
        return ret_code;
 }
 
-void i40e_resume_aq(struct i40e_hw *hw)
-{
-       /* Registers are reset after PF reset */
-       hw->aq.asq.next_to_use = 0;
-       hw->aq.asq.next_to_clean = 0;
-
-       i40e_config_asq_regs(hw);
-
-       hw->aq.arq.next_to_use = 0;
-       hw->aq.arq.next_to_clean = 0;
-
-       i40e_config_arq_regs(hw);
-}
similarity index 93%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_adminq.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_adminq.h
index 3a80aa0168ff51e906029ee98de36a787a0d08a0..b71b066f384fc221a1352f5293c9670e3e1862ed 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -97,7 +94,6 @@ struct i40e_adminq_info {
        u32 fw_build;                   /* firmware build number */
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
        u32 fw_build;                   /* firmware build number */
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
-       bool nvm_release_on_done;
 
        struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
        struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
 
        struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
        struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
@@ -151,8 +147,8 @@ static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
 }
 
 /* general information */
 }
 
 /* general information */
-#define I40E_AQ_LARGE_BUF              512
-#define I40E_ASQ_CMD_TIMEOUT           250  /* msecs */
+#define I40E_AQ_LARGE_BUF      512
+#define I40E_ASQ_CMD_TIMEOUT   250  /* msecs */
 
 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
                                       u16 opcode);
 
 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
                                       u16 opcode);
similarity index 91%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_adminq_cmd.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_adminq_cmd.h
index aa4a8b2f057157cad8923022f13d333889482f1c..ecaa757e00843428be87b07709e1151505d65bc4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -34,7 +31,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
  */
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
-#define I40E_FW_API_VERSION_MINOR      0x0004
+#define I40E_FW_API_VERSION_MINOR      0x0005
 
 struct i40e_aq_desc {
        __le16 flags;
 
 struct i40e_aq_desc {
        __le16 flags;
@@ -132,6 +129,10 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_list_func_capabilities     = 0x000A,
        i40e_aqc_opc_list_dev_capabilities      = 0x000B,
 
        i40e_aqc_opc_list_func_capabilities     = 0x000A,
        i40e_aqc_opc_list_dev_capabilities      = 0x000B,
 
+       /* Proxy commands */
+       i40e_aqc_opc_set_proxy_config           = 0x0104,
+       i40e_aqc_opc_set_ns_proxy_table_entry   = 0x0105,
+
        /* LAA */
        i40e_aqc_opc_mac_address_read   = 0x0107,
        i40e_aqc_opc_mac_address_write  = 0x0108,
        /* LAA */
        i40e_aqc_opc_mac_address_read   = 0x0107,
        i40e_aqc_opc_mac_address_write  = 0x0108,
@@ -139,12 +140,19 @@ enum i40e_admin_queue_opc {
        /* PXE */
        i40e_aqc_opc_clear_pxe_mode     = 0x0110,
 
        /* PXE */
        i40e_aqc_opc_clear_pxe_mode     = 0x0110,
 
+       /* WoL commands */
+       i40e_aqc_opc_set_wol_filter     = 0x0120,
+       i40e_aqc_opc_get_wake_reason    = 0x0121,
+
        /* internal switch commands */
        i40e_aqc_opc_get_switch_config          = 0x0200,
        i40e_aqc_opc_add_statistics             = 0x0201,
        i40e_aqc_opc_remove_statistics          = 0x0202,
        i40e_aqc_opc_set_port_parameters        = 0x0203,
        i40e_aqc_opc_get_switch_resource_alloc  = 0x0204,
        /* internal switch commands */
        i40e_aqc_opc_get_switch_config          = 0x0200,
        i40e_aqc_opc_add_statistics             = 0x0201,
        i40e_aqc_opc_remove_statistics          = 0x0202,
        i40e_aqc_opc_set_port_parameters        = 0x0203,
        i40e_aqc_opc_get_switch_resource_alloc  = 0x0204,
+       i40e_aqc_opc_set_switch_config          = 0x0205,
+       i40e_aqc_opc_rx_ctl_reg_read            = 0x0206,
+       i40e_aqc_opc_rx_ctl_reg_write           = 0x0207,
 
        i40e_aqc_opc_add_vsi                    = 0x0210,
        i40e_aqc_opc_update_vsi_parameters      = 0x0211,
 
        i40e_aqc_opc_add_vsi                    = 0x0210,
        i40e_aqc_opc_update_vsi_parameters      = 0x0211,
@@ -202,10 +210,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_resume_port_tx                             = 0x041C,
        i40e_aqc_opc_configure_partition_bw                     = 0x041D,
 
        i40e_aqc_opc_resume_port_tx                             = 0x041C,
        i40e_aqc_opc_configure_partition_bw                     = 0x041D,
 
-       /* hmc */
-       i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
-       i40e_aqc_opc_set_hmc_resource_profile   = 0x0501,
-
        /* phy commands*/
        i40e_aqc_opc_get_phy_abilities          = 0x0600,
        i40e_aqc_opc_set_phy_config             = 0x0601,
        /* phy commands*/
        i40e_aqc_opc_get_phy_abilities          = 0x0600,
        i40e_aqc_opc_set_phy_config             = 0x0601,
@@ -220,6 +224,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_get_phy_wol_caps           = 0x0621,
        i40e_aqc_opc_set_phy_debug              = 0x0622,
        i40e_aqc_opc_upload_ext_phy_fm          = 0x0625,
        i40e_aqc_opc_get_phy_wol_caps           = 0x0621,
        i40e_aqc_opc_set_phy_debug              = 0x0622,
        i40e_aqc_opc_upload_ext_phy_fm          = 0x0625,
+       i40e_aqc_opc_run_phy_activity           = 0x0626,
 
        /* NVM commands */
        i40e_aqc_opc_nvm_read                   = 0x0701,
 
        /* NVM commands */
        i40e_aqc_opc_nvm_read                   = 0x0701,
@@ -228,6 +233,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_nvm_config_read            = 0x0704,
        i40e_aqc_opc_nvm_config_write           = 0x0705,
        i40e_aqc_opc_oem_post_update            = 0x0720,
        i40e_aqc_opc_nvm_config_read            = 0x0704,
        i40e_aqc_opc_nvm_config_write           = 0x0705,
        i40e_aqc_opc_oem_post_update            = 0x0720,
+       i40e_aqc_opc_thermal_sensor             = 0x0721,
 
        /* virtualization commands */
        i40e_aqc_opc_send_msg_to_pf             = 0x0801,
 
        /* virtualization commands */
        i40e_aqc_opc_send_msg_to_pf             = 0x0801,
@@ -258,6 +264,10 @@ enum i40e_admin_queue_opc {
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
        i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
        i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
+       i40e_aqc_opc_set_rss_key        = 0x0B02,
+       i40e_aqc_opc_set_rss_lut        = 0x0B03,
+       i40e_aqc_opc_get_rss_key        = 0x0B04,
+       i40e_aqc_opc_get_rss_lut        = 0x0B05,
 
        /* Async Events */
        i40e_aqc_opc_event_lan_overflow         = 0x1001,
 
        /* Async Events */
        i40e_aqc_opc_event_lan_overflow         = 0x1001,
@@ -398,6 +408,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_OS2BMC_CAP      0x0004
 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
 #define I40E_AQ_CAP_ID_ALTERNATE_RAM   0x0006
 #define I40E_AQ_CAP_ID_OS2BMC_CAP      0x0004
 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
 #define I40E_AQ_CAP_ID_ALTERNATE_RAM   0x0006
+#define I40E_AQ_CAP_ID_WOL_AND_PROXY   0x0008
 #define I40E_AQ_CAP_ID_SRIOV           0x0012
 #define I40E_AQ_CAP_ID_VF              0x0013
 #define I40E_AQ_CAP_ID_VMDQ            0x0014
 #define I40E_AQ_CAP_ID_SRIOV           0x0012
 #define I40E_AQ_CAP_ID_VF              0x0013
 #define I40E_AQ_CAP_ID_VMDQ            0x0014
@@ -418,6 +429,8 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_LED             0x0061
 #define I40E_AQ_CAP_ID_SDP             0x0062
 #define I40E_AQ_CAP_ID_MDIO            0x0063
 #define I40E_AQ_CAP_ID_LED             0x0061
 #define I40E_AQ_CAP_ID_SDP             0x0062
 #define I40E_AQ_CAP_ID_MDIO            0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT                0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT                0x0080
 #define I40E_AQ_CAP_ID_FLEX10          0x00F1
 #define I40E_AQ_CAP_ID_CEM             0x00F2
 
 #define I40E_AQ_CAP_ID_FLEX10          0x00F1
 #define I40E_AQ_CAP_ID_CEM             0x00F2
 
@@ -546,6 +559,41 @@ struct i40e_aqc_clear_pxe {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
 
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
 
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+       __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS   8
+       __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER                                0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL             0x4000
+       __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID           0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID    0x4000
+       u8 reserved[2];
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+       u8 reserved_1[2];
+       __le16 wake_reason;
+       u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
+struct i40e_aqc_set_wol_filter_data {
+       u8 filter[128];
+       u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
 /* Switch configuration commands (0x02xx) */
 
 /* Used by many indirect commands that only pass an seid and a buffer in the
 /* Switch configuration commands (0x02xx) */
 
 /* Used by many indirect commands that only pass an seid and a buffer in the
@@ -676,6 +724,31 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
 
 I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
 
 
 I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
 
+/* Set Switch Configuration (direct 0x0205) */
+struct i40e_aqc_set_switch_config {
+       __le16  flags;
+#define I40E_AQ_SET_SWITCH_CFG_PROMISC         0x0001
+#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER       0x0002
+       __le16  valid_flags;
+       u8      reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
+
+/* Read Receive control registers  (direct 0x0206)
+ * Write Receive control registers (direct 0x0207)
+ *     used for accessing Rx control registers that can be
+ *     slow and need special handling when under high Rx load
+ */
+struct i40e_aqc_rx_ctl_reg_read_write {
+       __le32 reserved1;
+       __le32 address;
+       __le32 reserved2;
+       __le32 value;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
+
 /* Add VSI (indirect 0x0210)
  *    this indirect command uses struct i40e_aqc_vsi_properties_data
  *    as the indirect buffer (128 bytes)
 /* Add VSI (indirect 0x0210)
  *    this indirect command uses struct i40e_aqc_vsi_properties_data
  *    as the indirect buffer (128 bytes)
@@ -822,14 +895,18 @@ struct i40e_aqc_vsi_properties_data {
                                         I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
        /* queueing option section */
        u8      queueing_opt_flags;
                                         I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
        /* queueing option section */
        u8      queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA  0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA    0x08
 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA    0x10
 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA   0x20
 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA    0x10
 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA   0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI        0x40
        u8      queueing_opt_reserved[3];
        /* scheduler section */
        u8      up_enable_bits;
        u8      sched_reserved;
        /* outer up section */
        u8      queueing_opt_reserved[3];
        /* scheduler section */
        u8      up_enable_bits;
        u8      sched_reserved;
        /* outer up section */
-       __le32  outer_up_table; /* same structure and defines as ingress table */
+       __le32  outer_up_table; /* same structure and defines as ingress tbl */
        u8      cmd_reserved[8];
        /* last 32 bytes are written by FW */
        __le16  qs_handle[8];
        u8      cmd_reserved[8];
        /* last 32 bytes are written by FW */
        __le16  qs_handle[8];
@@ -898,7 +975,8 @@ struct i40e_aqc_add_veb {
                                        I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
 #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT     0x2
 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA                0x4
                                        I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
 #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT     0x2
 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA                0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER      0x8
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER      0x8     /* deprecated */
+#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS  0x10
        u8      enable_tcs;
        u8      reserved[9];
 };
        u8      enable_tcs;
        u8      reserved[9];
 };
@@ -965,6 +1043,7 @@ struct i40e_aqc_add_macvlan_element_data {
 #define I40E_AQC_MACVLAN_ADD_HASH_MATCH                0x0002
 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN       0x0004
 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE          0x0008
 #define I40E_AQC_MACVLAN_ADD_HASH_MATCH                0x0002
 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN       0x0004
 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE          0x0008
+#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC    0x0010
        __le16  queue_number;
 #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT       0
 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK                (0x7FF << \
        __le16  queue_number;
 #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT       0
 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK                (0x7FF << \
@@ -1061,6 +1140,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
 #define I40E_AQC_SET_VSI_PROMISC_BROADCAST     0x04
 #define I40E_AQC_SET_VSI_DEFAULT               0x08
 #define I40E_AQC_SET_VSI_PROMISC_VLAN          0x10
 #define I40E_AQC_SET_VSI_PROMISC_BROADCAST     0x04
 #define I40E_AQC_SET_VSI_DEFAULT               0x08
 #define I40E_AQC_SET_VSI_PROMISC_VLAN          0x10
+#define I40E_AQC_SET_VSI_PROMISC_TX            0x8000
        __le16  seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK                0x3FF
        __le16  vlan_tag;
        __le16  seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK                0x3FF
        __le16  vlan_tag;
@@ -1249,10 +1329,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT              9
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK               0x1E00
 
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT              9
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK               0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN              0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN              0
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC         1
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC         1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                        2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE             2
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                 3
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                 3
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED           4
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE          5
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC      0x2000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC      0x4000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP       0x8000
 
        __le32  tenant_id;
        u8      reserved[4];
 
        __le32  tenant_id;
        u8      reserved[4];
@@ -1460,7 +1546,8 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
        u8      reserved1[28];
 };
 
        u8      reserved1[28];
 };
 
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+I40E_CHECK_STRUCT_LEN(0x40,
+                     i40e_aqc_configure_switching_comp_ets_bw_limit_data);
 
 /* Configure Switching Component Bandwidth Allocation per Tc
  * (indirect 0x0417)
 
 /* Configure Switching Component Bandwidth Allocation per Tc
  * (indirect 0x0417)
@@ -1535,27 +1622,6 @@ struct i40e_aqc_configure_partition_bw_data {
 
 I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
 
 
 I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
 
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
-       u8      pm_profile;
-       u8      pe_vf_enabled;
-       u8      reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
-       /* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
-       I40E_HMC_PROFILE_DEFAULT        = 1,
-       I40E_HMC_PROFILE_FAVOR_VF       = 2,
-       I40E_HMC_PROFILE_EQUAL          = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK       0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK    0x3F
-
 /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
 
 /* set in param0 for get phy abilities to report qualified modules */
 /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
 
 /* set in param0 for get phy abilities to report qualified modules */
@@ -1591,6 +1657,10 @@ enum i40e_aq_phy_type {
        I40E_PHY_TYPE_1000BASE_LX               = 0x1C,
        I40E_PHY_TYPE_1000BASE_T_OPTICAL        = 0x1D,
        I40E_PHY_TYPE_20GBASE_KR2               = 0x1E,
        I40E_PHY_TYPE_1000BASE_LX               = 0x1C,
        I40E_PHY_TYPE_1000BASE_T_OPTICAL        = 0x1D,
        I40E_PHY_TYPE_20GBASE_KR2               = 0x1E,
+       I40E_PHY_TYPE_25GBASE_KR                = 0x1F,
+       I40E_PHY_TYPE_25GBASE_CR                = 0x20,
+       I40E_PHY_TYPE_25GBASE_SR                = 0x21,
+       I40E_PHY_TYPE_25GBASE_LR                = 0x22,
        I40E_PHY_TYPE_MAX
 };
 
        I40E_PHY_TYPE_MAX
 };
 
@@ -1746,7 +1816,12 @@ struct i40e_aqc_get_link_status {
        u8      config;
 #define I40E_AQ_CONFIG_CRC_ENA         0x04
 #define I40E_AQ_CONFIG_PACING_MASK     0x78
        u8      config;
 #define I40E_AQ_CONFIG_CRC_ENA         0x04
 #define I40E_AQ_CONFIG_PACING_MASK     0x78
-       u8      reserved[5];
+       u8      external_power_ability;
+#define I40E_AQ_LINK_POWER_CLASS_1     0x00
+#define I40E_AQ_LINK_POWER_CLASS_2     0x01
+#define I40E_AQ_LINK_POWER_CLASS_3     0x02
+#define I40E_AQ_LINK_POWER_CLASS_4     0x03
+       u8      reserved[4];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1802,7 +1877,10 @@ struct i40e_aqc_set_phy_debug {
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE  0x00
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD  0x01
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT  0x02
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE  0x00
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD  0x01
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT  0x02
+/* Disable link manageability on a single port */
 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW      0x10
 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW      0x10
+/* Disable link manageability on all ports needs both bits 4 and 5 */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW  0x20
        u8      reserved[15];
 };
 
        u8      reserved[15];
 };
 
@@ -1814,6 +1892,18 @@ enum i40e_aq_phy_reg_type {
        I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
 };
 
        I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
 };
 
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+       __le16  activity_id;
+       u8      flags;
+       u8      reserved1;
+       __le32  control;
+       __le32  data;
+       u8      reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
 /* NVM Read command (indirect 0x0701)
  * NVM Erase commands (direct 0x0702)
  * NVM Update commands (indirect 0x0703)
 /* NVM Read command (indirect 0x0701)
  * NVM Erase commands (direct 0x0702)
  * NVM Update commands (indirect 0x0703)
@@ -1838,7 +1928,7 @@ struct i40e_aqc_nvm_config_read {
 #define I40E_AQ_ANVM_READ_SINGLE_FEATURE               0
 #define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES            1
        __le16  element_count;
 #define I40E_AQ_ANVM_READ_SINGLE_FEATURE               0
 #define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES            1
        __le16  element_count;
-       __le16  element_id;     /* Feature/field ID */
+       __le16  element_id;     /* Feature/field ID */
        __le16  element_id_msw; /* MSWord of field ID */
        __le32  address_high;
        __le32  address_low;
        __le16  element_id_msw; /* MSWord of field ID */
        __le32  address_high;
        __le32  address_low;
@@ -1859,9 +1949,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
 
 /* Used for 0x0704 as well as for 0x0705 commands */
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT                1
 
 /* Used for 0x0704 as well as for 0x0705 commands */
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT                1
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK         (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
-#define I40E_AQ_ANVM_FEATURE                           0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD                   (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+                               (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE           0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD   (1 << FEATURE_OR_IMMEDIATE_SHIFT)
 struct i40e_aqc_nvm_config_data_feature {
        __le16 feature_id;
 #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY           0x01
 struct i40e_aqc_nvm_config_data_feature {
        __le16 feature_id;
 #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY           0x01
@@ -1885,7 +1976,7 @@ I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
 /* OEM Post Update (indirect 0x0720)
  * no command data struct used
  */
 /* OEM Post Update (indirect 0x0720)
  * no command data struct used
  */
- struct i40e_aqc_nvm_oem_post_update {
+struct i40e_aqc_nvm_oem_post_update {
 #define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA      0x01
        u8 sel_data;
        u8 reserved[7];
 #define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA      0x01
        u8 sel_data;
        u8 reserved[7];
@@ -1902,6 +1993,22 @@ struct i40e_aqc_nvm_oem_post_update_buffer {
 
 I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
 
 
 I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
 
+/* Thermal Sensor (indirect 0x0721)
+ *     read or set thermal sensor configs and values
+ *     takes a sensor and command specific data buffer, not detailed here
+ */
+struct i40e_aqc_thermal_sensor {
+       u8 sensor_action;
+#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG     0
+#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG      1
+#define I40E_AQ_THERMAL_SENSOR_READ_TEMP       2
+       u8 reserved[7];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor);
+
 /* Send to PF command (indirect 0x0801) id is only used by PF
  * Send to VF command (indirect 0x0802) id is only used by PF
  * Send to Peer PF command (indirect 0x0803)
 /* Send to PF command (indirect 0x0801) id is only used by PF
  * Send to VF command (indirect 0x0802) id is only used by PF
  * Send to Peer PF command (indirect 0x0803)
@@ -2146,12 +2253,21 @@ struct i40e_aqc_lldp_set_local_mib {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
 
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
 
+struct i40e_aqc_lldp_set_local_mib_resp {
+#define SET_LOCAL_MIB_RESP_EVENT_TRIGGERED_MASK      0x01
+       u8  status;
+       u8  reserved[15];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp);
+
 /*     Stop/Start LLDP Agent (direct 0x0A09)
  *     Used for stopping/starting specific LLDP agent. e.g. DCBx
  */
 struct i40e_aqc_lldp_stop_start_specific_agent {
 #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT    0
 /*     Stop/Start LLDP Agent (direct 0x0A09)
  *     Used for stopping/starting specific LLDP agent. e.g. DCBx
  */
 struct i40e_aqc_lldp_stop_start_specific_agent {
 #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT    0
-#define I40E_AQC_START_SPECIFIC_AGENT_MASK     (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
+                               (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
        u8      command;
        u8      reserved[15];
 };
        u8      command;
        u8      reserved[15];
 };
@@ -2166,13 +2282,14 @@ struct i40e_aqc_add_udp_tunnel {
 #define I40E_AQC_TUNNEL_TYPE_VXLAN     0x00
 #define I40E_AQC_TUNNEL_TYPE_NGE       0x01
 #define I40E_AQC_TUNNEL_TYPE_TEREDO    0x10
 #define I40E_AQC_TUNNEL_TYPE_VXLAN     0x00
 #define I40E_AQC_TUNNEL_TYPE_NGE       0x01
 #define I40E_AQC_TUNNEL_TYPE_TEREDO    0x10
+#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
        u8      reserved1[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
 
 struct i40e_aqc_add_udp_tunnel_completion {
        u8      reserved1[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
 
 struct i40e_aqc_add_udp_tunnel_completion {
-       __le16 udp_port;
+       __le16  udp_port;
        u8      filter_entry_index;
        u8      multiple_pfs;
 #define I40E_AQC_SINGLE_PF             0x0
        u8      filter_entry_index;
        u8      multiple_pfs;
 #define I40E_AQC_SINGLE_PF             0x0
@@ -2202,6 +2319,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT      0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK       (0x3FF << \
+                                       I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+       __le16  vsi_id;
+       u8      reserved[6];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+       u8 standard_rss_key[0x28];
+       u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct  i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID         (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT      0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK       (0x3FF << \
+                                       I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+       __le16  vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT  0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK   (0x1 << \
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI    0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF     1
+       __le16  flags;
+       u8      reserved[4];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
 /* tunnel key structure 0x0B10 */
 
 struct i40e_aqc_tunnel_key_structure {
 /* tunnel key structure 0x0B10 */
 
 struct i40e_aqc_tunnel_key_structure {
@@ -2359,4 +2516,4 @@ struct i40e_aqc_debug_modify_internals {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
 
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
 
-#endif
+#endif /* _I40E_ADMINQ_CMD_H_ */
similarity index 88%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_alloc.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_alloc.h
index 2e7e3cf98c53723625206342f07d035375dabeb8..c4b5c6ee2e25679e01419275718891ec4ef3cab2 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
similarity index 82%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_common.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_common.c
index 2aded6bbd51a63d8a18d6e4636a539b894c56b68..c0b3dcc3ac786f7d686fd9ee48fe63d75f6d1b75 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -44,7 +41,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                switch (hw->device_id) {
                case I40E_DEV_ID_SFP_XL710:
                case I40E_DEV_ID_QEMU:
                switch (hw->device_id) {
                case I40E_DEV_ID_SFP_XL710:
                case I40E_DEV_ID_QEMU:
-               case I40E_DEV_ID_KX_A:
                case I40E_DEV_ID_KX_B:
                case I40E_DEV_ID_KX_C:
                case I40E_DEV_ID_QSFP_A:
                case I40E_DEV_ID_KX_B:
                case I40E_DEV_ID_KX_C:
                case I40E_DEV_ID_QSFP_A:
@@ -54,11 +50,18 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_10G_BASE_T4:
                case I40E_DEV_ID_20G_KR2:
                case I40E_DEV_ID_20G_KR2_A:
                case I40E_DEV_ID_10G_BASE_T4:
                case I40E_DEV_ID_20G_KR2:
                case I40E_DEV_ID_20G_KR2_A:
+               case I40E_DEV_ID_25G_B:
+               case I40E_DEV_ID_25G_SFP28:
                        hw->mac.type = I40E_MAC_XL710;
                        break;
                        hw->mac.type = I40E_MAC_XL710;
                        break;
-               case I40E_DEV_ID_VF:
-               case I40E_DEV_ID_VF_HV:
-                       hw->mac.type = I40E_MAC_VF;
+               case I40E_DEV_ID_KX_X722:
+               case I40E_DEV_ID_QSFP_X722:
+               case I40E_DEV_ID_SFP_X722:
+               case I40E_DEV_ID_1G_BASE_T_X722:
+               case I40E_DEV_ID_10G_BASE_T_X722:
+               case I40E_DEV_ID_SFP_I_X722:
+               case I40E_DEV_ID_QSFP_I_X722:
+                       hw->mac.type = I40E_MAC_X722;
                        break;
                default:
                        hw->mac.type = I40E_MAC_GENERIC;
                        break;
                default:
                        hw->mac.type = I40E_MAC_GENERIC;
@@ -328,16 +331,21 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
                                   buf[i+4], buf[i+5], buf[i+6], buf[i+7],
                                   buf[i+8], buf[i+9], buf[i+10], buf[i+11],
                                   buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
                                   buf[i+4], buf[i+5], buf[i+6], buf[i+7],
                                   buf[i+8], buf[i+9], buf[i+10], buf[i+11],
                                   buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
-               /* write whatever's left over without overrunning the buffer */
+               /* the most we could have left is 16 bytes, pad with zeros */
                if (i < len) {
                if (i < len) {
-                       char d_buf[80];
-                       int j = 0;
+                       char d_buf[16];
+                       int j, i_sav;
 
 
+                       i_sav = i;
                        memset(d_buf, 0, sizeof(d_buf));
                        memset(d_buf, 0, sizeof(d_buf));
-                       j += sprintf(d_buf, "\t0x%04X ", i);
-                       while (i < len)
-                               j += sprintf(&d_buf[j], " %02X", buf[i++]);
-                       i40e_debug(hw, mask, "%s\n", d_buf);
+                       for (j = 0; i < len; j++, i++)
+                               d_buf[j] = buf[i];
+                       i40e_debug(hw, mask,
+                                  "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+                                  i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+                                  d_buf[4], d_buf[5], d_buf[6], d_buf[7],
+                                  d_buf[8], d_buf[9], d_buf[10], d_buf[11],
+                                  d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
                }
        }
 }
                }
        }
 }
@@ -382,6 +390,164 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
        return status;
 }
 
        return status;
 }
 
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+                                                    u16 vsi_id, bool pf_lut,
+                                                    u8 *lut, u16 lut_size,
+                                                    bool set)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_set_rss_lut *cmd_resp =
+                  (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+       if (set)
+               i40e_fill_default_direct_cmd_desc(&desc,
+                                                 i40e_aqc_opc_set_rss_lut);
+       else
+               i40e_fill_default_direct_cmd_desc(&desc,
+                                                 i40e_aqc_opc_get_rss_lut);
+
+       /* Indirect command */
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+       cmd_resp->vsi_id =
+                       CPU_TO_LE16((u16)((vsi_id <<
+                                         I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+                                         I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+       cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+       if (pf_lut)
+               cmd_resp->flags |= CPU_TO_LE16((u16)
+                                       ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+       else
+               cmd_resp->flags |= CPU_TO_LE16((u16)
+                                       ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+                                       I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+       status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+                                         bool pf_lut, u8 *lut, u16 lut_size)
+{
+       return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+                                      false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+                                         bool pf_lut, u8 *lut, u16 lut_size)
+{
+       return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+                                     u16 vsi_id,
+                                     struct i40e_aqc_get_set_rss_key_data *key,
+                                     bool set)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_set_rss_key *cmd_resp =
+                       (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+       u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+       if (set)
+               i40e_fill_default_direct_cmd_desc(&desc,
+                                                 i40e_aqc_opc_set_rss_key);
+       else
+               i40e_fill_default_direct_cmd_desc(&desc,
+                                                 i40e_aqc_opc_get_rss_key);
+
+       /* Indirect command */
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+       cmd_resp->vsi_id =
+                       CPU_TO_LE16((u16)((vsi_id <<
+                                         I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+                                         I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+       cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+       status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+                                     u16 vsi_id,
+                                     struct i40e_aqc_get_set_rss_key_data *key)
+{
+       return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+                                     u16 vsi_id,
+                                     struct i40e_aqc_get_set_rss_key_data *key)
+{
+       return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
  * packet type.
 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
  * packet type.
@@ -761,7 +927,7 @@ i40e_status i40e_validate_mac_addr(u8 *mac_addr)
         * Make sure it is not a multicast address
         * Reject the zero address
         */
         * Make sure it is not a multicast address
         * Reject the zero address
         */
-       if (I40E_IS_MULTICAST(mac_addr) ||
+       if (is_multicast_ether_addr(mac_addr) ||
            (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
              mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
                status = I40E_ERR_INVALID_MAC_ADDR;
            (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
              mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
                status = I40E_ERR_INVALID_MAC_ADDR;
@@ -790,6 +956,7 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
 
        switch (hw->mac.type) {
        case I40E_MAC_XL710:
 
        switch (hw->mac.type) {
        case I40E_MAC_XL710:
+       case I40E_MAC_X722:
                break;
        default:
                return I40E_ERR_DEVICE_NOT_SUPPORTED;
                break;
        default:
                return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -809,6 +976,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
        else
                hw->pf_id = (u8)(func_rid & 0x7);
 
        else
                hw->pf_id = (u8)(func_rid & 0x7);
 
+       if (hw->mac.type == I40E_MAC_X722)
+               hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+
        status = i40e_init_nvm(hw);
        return status;
 }
        status = i40e_init_nvm(hw);
        return status;
 }
@@ -1108,7 +1278,10 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
        grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
                        I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
                        I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
        grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
                        I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
                        I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
-       for (cnt = 0; cnt < grst_del + 10; cnt++) {
+
+       grst_del = grst_del * 20;
+
+       for (cnt = 0; cnt < grst_del; cnt++) {
                reg = rd32(hw, I40E_GLGEN_RSTAT);
                if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
                        break;
                reg = rd32(hw, I40E_GLGEN_RSTAT);
                if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
                        break;
@@ -2002,10 +2175,12 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
  * @seid: vsi number
  * @set: set unicast promiscuous enable/disable
  * @cmd_details: pointer to command details structure or NULL
  * @seid: vsi number
  * @set: set unicast promiscuous enable/disable
  * @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
  **/
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
                                u16 seid, bool set,
  **/
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
                                u16 seid, bool set,
-                               struct i40e_asq_cmd_details *cmd_details)
+                               struct i40e_asq_cmd_details *cmd_details,
+                               bool rx_only_promisc)
 {
        struct i40e_aq_desc desc;
        struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
 {
        struct i40e_aq_desc desc;
        struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -2016,12 +2191,20 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
        i40e_fill_default_direct_cmd_desc(&desc,
                                        i40e_aqc_opc_set_vsi_promiscuous_modes);
 
        i40e_fill_default_direct_cmd_desc(&desc,
                                        i40e_aqc_opc_set_vsi_promiscuous_modes);
 
-       if (set)
+       if (set) {
                flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
                flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+               if (rx_only_promisc &&
+                   (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+                    (hw->aq.api_maj_ver > 1)))
+                       flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+       }
 
        cmd->promiscuous_flags = CPU_TO_LE16(flags);
 
        cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
 
        cmd->promiscuous_flags = CPU_TO_LE16(flags);
 
        cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+       if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
+            (hw->aq.api_maj_ver > 1))
+               cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX);
 
        cmd->seid = CPU_TO_LE16(seid);
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
        cmd->seid = CPU_TO_LE16(seid);
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2164,6 +2347,37 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
        return status;
 }
 
        return status;
 }
 
+/**
+ * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+                               u16 seid, bool enable,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       i40e_status status;
+       u16 flags = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_vsi_promiscuous_modes);
+       if (enable)
+               flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
+       
+       cmd->promiscuous_flags = CPU_TO_LE16(flags);
+       cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
+       cmd->seid = CPU_TO_LE16(seid);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
 /**
  * i40e_get_vsi_params - get VSI configuration info
  * @hw: pointer to the hw struct
 /**
  * i40e_get_vsi_params - get VSI configuration info
  * @hw: pointer to the hw struct
@@ -2219,6 +2433,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
        struct i40e_aq_desc desc;
        struct i40e_aqc_add_get_update_vsi *cmd =
                (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
        struct i40e_aq_desc desc;
        struct i40e_aqc_add_get_update_vsi *cmd =
                (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+       struct i40e_aqc_add_get_update_vsi_completion *resp =
+               (struct i40e_aqc_add_get_update_vsi_completion *)
+               &desc.params.raw;
        i40e_status status;
 
        i40e_fill_default_direct_cmd_desc(&desc,
        i40e_status status;
 
        i40e_fill_default_direct_cmd_desc(&desc,
@@ -2230,6 +2447,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
        status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
                                    sizeof(vsi_ctx->info), cmd_details);
 
        status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
                                    sizeof(vsi_ctx->info), cmd_details);
 
+       vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+       vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
        return status;
 }
 
        return status;
 }
 
@@ -2266,6 +2486,34 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
        return status;
 }
 
        return status;
 }
 
+/**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @valid_flags: which bit flags to set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+i40e_status i40e_aq_set_switch_config(struct i40e_hw *hw,
+                               u16 flags, u16 valid_flags,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_switch_config *scfg =
+               (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_switch_config);
+       scfg->flags = CPU_TO_LE16(flags);
+       scfg->valid_flags = CPU_TO_LE16(valid_flags);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
 /**
  * i40e_aq_get_firmware_version
  * @hw: pointer to the hw struct
 /**
  * i40e_aq_get_firmware_version
  * @hw: pointer to the hw struct
@@ -2406,7 +2654,6 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
                memcpy(hw->phy.link_info.module_type, &abilities.module_type,
                        sizeof(hw->phy.link_info.module_type));
        }
                memcpy(hw->phy.link_info.module_type, &abilities.module_type,
                        sizeof(hw->phy.link_info.module_type));
        }
-
        return status;
 }
 
        return status;
 }
 
@@ -2442,8 +2689,8 @@ i40e_link_speed_exit:
  * @downlink_seid: the VSI SEID
  * @enabled_tc: bitmap of TCs to be enabled
  * @default_port: true for default port VSI, false for control port
  * @downlink_seid: the VSI SEID
  * @enabled_tc: bitmap of TCs to be enabled
  * @default_port: true for default port VSI, false for control port
- * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
  * @veb_seid: pointer to where to put the resulting VEB SEID
  * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @enable_stats: true to turn on VEB stats
  * @cmd_details: pointer to command details structure or NULL
  *
  * This asks the FW to add a VEB between the uplink and downlink
  * @cmd_details: pointer to command details structure or NULL
  *
  * This asks the FW to add a VEB between the uplink and downlink
@@ -2451,8 +2698,8 @@ i40e_link_speed_exit:
  **/
 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
                                u16 downlink_seid, u8 enabled_tc,
  **/
 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
                                u16 downlink_seid, u8 enabled_tc,
-                               bool default_port, bool enable_l2_filtering,
-                               u16 *veb_seid,
+                               bool default_port, u16 *veb_seid,
+                               bool enable_stats,
                                struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
                                struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
@@ -2479,8 +2726,9 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
        else
                veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
 
        else
                veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
 
-       if (enable_l2_filtering)
-               veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+       /* reverse logic here: set the bitflag to disable the stats */
+       if (!enable_stats)
+               veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
 
        cmd->veb_flags = CPU_TO_LE16(veb_flags);
 
 
        cmd->veb_flags = CPU_TO_LE16(veb_flags);
 
@@ -2569,6 +2817,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
                (struct i40e_aqc_macvlan *)&desc.params.raw;
        i40e_status status;
        u16 buf_size;
                (struct i40e_aqc_macvlan *)&desc.params.raw;
        i40e_status status;
        u16 buf_size;
+       int i;
 
        if (count == 0 || !mv_list || !hw)
                return I40E_ERR_PARAM;
 
        if (count == 0 || !mv_list || !hw)
                return I40E_ERR_PARAM;
@@ -2582,12 +2831,17 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
        cmd->seid[1] = 0;
        cmd->seid[2] = 0;
 
        cmd->seid[1] = 0;
        cmd->seid[2] = 0;
 
+       for (i = 0; i < count; i++)
+               if (is_multicast_ether_addr(mv_list[i].mac_addr))
+                       mv_list[i].flags |=
+                           CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
+
        desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
        if (buf_size > I40E_AQ_LARGE_BUF)
                desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
        status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
        desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
        if (buf_size > I40E_AQ_LARGE_BUF)
                desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
 
        status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
-                                   cmd_details);
+                                      cmd_details);
 
        return status;
 }
 
        return status;
 }
@@ -2634,6 +2888,134 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
        return status;
 }
 
        return status;
 }
 
+/**
+ * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
+ * @hw: pointer to the hw struct
+ * @opcode: AQ opcode for add or delete mirror rule
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @id: Destination VSI SEID or Rule ID
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rule_used: Number of rules used in internal switch
+ * @rule_free: Number of rules free in internal switch
+ *
+ * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
+ * VEBs/VEPA elements only
+ **/
+static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
+                       u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+                       u16 count, __le16 *mr_list,
+                       struct i40e_asq_cmd_details *cmd_details,
+                       u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_delete_mirror_rule *cmd =
+               (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
+       struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+       (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
+       i40e_status status;
+       u16 buf_size;
+
+       buf_size = count * sizeof(*mr_list);
+
+       /* prep the rest of the request */
+       i40e_fill_default_direct_cmd_desc(&desc, opcode);
+       cmd->seid = CPU_TO_LE16(sw_seid);
+       cmd->rule_type = CPU_TO_LE16(rule_type &
+                                    I40E_AQC_MIRROR_RULE_TYPE_MASK);
+       cmd->num_entries = CPU_TO_LE16(count);
+       /* Dest VSI for add, rule_id for delete */
+       cmd->destination = CPU_TO_LE16(id);
+       if (mr_list) {
+               desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+                                               I40E_AQ_FLAG_RD));
+               if (buf_size > I40E_AQ_LARGE_BUF)
+                       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+       }
+
+       status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
+                                      cmd_details);
+       if (status == I40E_SUCCESS ||
+           hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
+               if (rule_id)
+                       *rule_id = LE16_TO_CPU(resp->rule_id);
+               if (rules_used)
+                       *rules_used = LE16_TO_CPU(resp->mirror_rules_used);
+               if (rules_free)
+                       *rules_free = LE16_TO_CPU(resp->mirror_rules_free);
+       }
+       return status;
+}
+
+/**
+ * i40e_aq_add_mirrorrule - add a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @dest_vsi: SEID of VSI to which packets will be mirrored
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rule_used: Number of rules used in internal switch
+ * @rule_free: Number of rules free in internal switch
+ *
+ * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
+ **/
+i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+                       u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+                       struct i40e_asq_cmd_details *cmd_details,
+                       u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+       if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
+           rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
+               if (count == 0 || !mr_list)
+                       return I40E_ERR_PARAM;
+       }
+
+       return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
+                                 rule_type, dest_vsi, count, mr_list,
+                                 cmd_details, rule_id, rules_used, rules_free);
+}
+
+/**
+ * i40e_aq_delete_mirrorrule - delete a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @count: length of the list
+ * @rule_id: Rule ID that is returned in the receive desc as part of
+ *             add_mirrorrule.
+ * @mr_list: list of mirrored VLAN IDs to be removed
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_used: Number of rules used in internal switch
+ * @rule_free: Number of rules free in internal switch
+ *
+ * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
+ **/
+i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+                       u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+                       struct i40e_asq_cmd_details *cmd_details,
+                       u16 *rules_used, u16 *rules_free)
+{
+       /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
+       if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+               /* count and mr_list shall be valid for rule_type INGRESS VLAN
+                * mirroring. For other rule_type, count and rule_type should
+                * not matter.
+                */
+               if (count == 0 || !mr_list)
+                       return I40E_ERR_PARAM;
+       }
+
+       return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
+                                 rule_type, rule_id, count, mr_list,
+                                 cmd_details, NULL, rules_used, rules_free);
+}
+
 /**
  * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
  * @hw: pointer to the hw struct
 /**
  * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
  * @hw: pointer to the hw struct
@@ -2816,67 +3198,6 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
        return status;
 }
 
        return status;
 }
 
-/**
- * i40e_aq_get_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * query the HMC profile of the device.
- **/
-i40e_status i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
-                               enum i40e_aq_hmc_profile *profile,
-                               u8 *pe_vf_enabled_count,
-                               struct i40e_asq_cmd_details *cmd_details)
-{
-       struct i40e_aq_desc desc;
-       struct i40e_aq_get_set_hmc_resource_profile *resp =
-               (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
-       i40e_status status;
-
-       i40e_fill_default_direct_cmd_desc(&desc,
-                               i40e_aqc_opc_query_hmc_resource_profile);
-       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-       *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
-                  I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
-       *pe_vf_enabled_count = resp->pe_vf_enabled &
-                              I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
-
-       return status;
-}
-
-/**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
-                               enum i40e_aq_hmc_profile profile,
-                               u8 pe_vf_enabled_count,
-                               struct i40e_asq_cmd_details *cmd_details)
-{
-       struct i40e_aq_desc desc;
-       struct i40e_aq_get_set_hmc_resource_profile *cmd =
-               (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
-       i40e_status status;
-
-       i40e_fill_default_direct_cmd_desc(&desc,
-                                       i40e_aqc_opc_set_hmc_resource_profile);
-
-       cmd->pm_profile = (u8)profile;
-       cmd->pe_vf_enabled = pe_vf_enabled_count;
-
-       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-       return status;
-}
-
 /**
  * i40e_aq_request_resource
  * @hw: pointer to the hw struct
 /**
  * i40e_aq_request_resource
  * @hw: pointer to the hw struct
@@ -3127,35 +3448,6 @@ i40e_aq_erase_nvm_exit:
        return status;
 }
 
        return status;
 }
 
-#define I40E_DEV_FUNC_CAP_SWITCH_MODE  0x01
-#define I40E_DEV_FUNC_CAP_MGMT_MODE    0x02
-#define I40E_DEV_FUNC_CAP_NPAR         0x03
-#define I40E_DEV_FUNC_CAP_OS2BMC       0x04
-#define I40E_DEV_FUNC_CAP_VALID_FUNC   0x05
-#define I40E_DEV_FUNC_CAP_SRIOV_1_1    0x12
-#define I40E_DEV_FUNC_CAP_VF           0x13
-#define I40E_DEV_FUNC_CAP_VMDQ         0x14
-#define I40E_DEV_FUNC_CAP_802_1_QBG    0x15
-#define I40E_DEV_FUNC_CAP_802_1_QBH    0x16
-#define I40E_DEV_FUNC_CAP_VSI          0x17
-#define I40E_DEV_FUNC_CAP_DCB          0x18
-#define I40E_DEV_FUNC_CAP_FCOE         0x21
-#define I40E_DEV_FUNC_CAP_ISCSI                0x22
-#define I40E_DEV_FUNC_CAP_RSS          0x40
-#define I40E_DEV_FUNC_CAP_RX_QUEUES    0x41
-#define I40E_DEV_FUNC_CAP_TX_QUEUES    0x42
-#define I40E_DEV_FUNC_CAP_MSIX         0x43
-#define I40E_DEV_FUNC_CAP_MSIX_VF      0x44
-#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR        0x45
-#define I40E_DEV_FUNC_CAP_IEEE_1588    0x46
-#define I40E_DEV_FUNC_CAP_FLEX10       0xF1
-#define I40E_DEV_FUNC_CAP_CEM          0xF2
-#define I40E_DEV_FUNC_CAP_IWARP                0x51
-#define I40E_DEV_FUNC_CAP_LED          0x61
-#define I40E_DEV_FUNC_CAP_SDP          0x62
-#define I40E_DEV_FUNC_CAP_MDIO         0x63
-#define I40E_DEV_FUNC_CAP_WR_CSR_PROT  0x64
-
 /**
  * i40e_parse_discover_capabilities
  * @hw: pointer to the hw struct
 /**
  * i40e_parse_discover_capabilities
  * @hw: pointer to the hw struct
@@ -3194,79 +3486,146 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                major_rev = cap->major_rev;
 
                switch (id) {
                major_rev = cap->major_rev;
 
                switch (id) {
-               case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+               case I40E_AQ_CAP_ID_SWITCH_MODE:
                        p->switch_mode = number;
                        p->switch_mode = number;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: Switch mode = %d\n",
+                                  p->switch_mode);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_MGMT_MODE:
+               case I40E_AQ_CAP_ID_MNG_MODE:
                        p->management_mode = number;
                        p->management_mode = number;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: Management Mode = %d\n",
+                                  p->management_mode);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_NPAR:
+               case I40E_AQ_CAP_ID_NPAR_ACTIVE:
                        p->npar_enable = number;
                        p->npar_enable = number;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: NPAR enable = %d\n",
+                                  p->npar_enable);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_OS2BMC:
+               case I40E_AQ_CAP_ID_OS2BMC_CAP:
                        p->os2bmc = number;
                        p->os2bmc = number;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: OS2BMC = %d\n", p->os2bmc);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_VALID_FUNC:
+               case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
                        p->valid_functions = number;
                        p->valid_functions = number;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: Valid Functions = %d\n",
+                                  p->valid_functions);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+               case I40E_AQ_CAP_ID_SRIOV:
                        if (number == 1)
                                p->sr_iov_1_1 = true;
                        if (number == 1)
                                p->sr_iov_1_1 = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: SR-IOV = %d\n",
+                                  p->sr_iov_1_1);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_VF:
+               case I40E_AQ_CAP_ID_VF:
                        p->num_vfs = number;
                        p->vf_base_id = logical_id;
                        p->num_vfs = number;
                        p->vf_base_id = logical_id;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: VF count = %d\n",
+                                  p->num_vfs);
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: VF base_id = %d\n",
+                                  p->vf_base_id);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_VMDQ:
+               case I40E_AQ_CAP_ID_VMDQ:
                        if (number == 1)
                                p->vmdq = true;
                        if (number == 1)
                                p->vmdq = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: VMDQ = %d\n", p->vmdq);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_802_1_QBG:
+               case I40E_AQ_CAP_ID_8021QBG:
                        if (number == 1)
                                p->evb_802_1_qbg = true;
                        if (number == 1)
                                p->evb_802_1_qbg = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: 802.1Qbg = %d\n", number);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_802_1_QBH:
+               case I40E_AQ_CAP_ID_8021QBR:
                        if (number == 1)
                                p->evb_802_1_qbh = true;
                        if (number == 1)
                                p->evb_802_1_qbh = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: 802.1Qbh = %d\n", number);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_VSI:
+               case I40E_AQ_CAP_ID_VSI:
                        p->num_vsis = number;
                        p->num_vsis = number;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: VSI count = %d\n",
+                                  p->num_vsis);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_DCB:
+               case I40E_AQ_CAP_ID_DCB:
                        if (number == 1) {
                                p->dcb = true;
                                p->enabled_tcmap = logical_id;
                                p->maxtc = phys_id;
                        }
                        if (number == 1) {
                                p->dcb = true;
                                p->enabled_tcmap = logical_id;
                                p->maxtc = phys_id;
                        }
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: DCB = %d\n", p->dcb);
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: TC Mapping = %d\n",
+                                  logical_id);
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: TC Max = %d\n", p->maxtc);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_FCOE:
+               case I40E_AQ_CAP_ID_FCOE:
                        if (number == 1)
                                p->fcoe = true;
                        if (number == 1)
                                p->fcoe = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: FCOE = %d\n", p->fcoe);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_ISCSI:
+               case I40E_AQ_CAP_ID_ISCSI:
                        if (number == 1)
                                p->iscsi = true;
                        if (number == 1)
                                p->iscsi = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: iSCSI = %d\n", p->iscsi);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_RSS:
+               case I40E_AQ_CAP_ID_RSS:
                        p->rss = true;
                        p->rss_table_size = number;
                        p->rss_table_entry_width = logical_id;
                        p->rss = true;
                        p->rss_table_size = number;
                        p->rss_table_entry_width = logical_id;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: RSS = %d\n", p->rss);
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: RSS table size = %d\n",
+                                  p->rss_table_size);
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: RSS table width = %d\n",
+                                  p->rss_table_entry_width);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_RX_QUEUES:
+               case I40E_AQ_CAP_ID_RXQ:
                        p->num_rx_qp = number;
                        p->base_queue = phys_id;
                        p->num_rx_qp = number;
                        p->base_queue = phys_id;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: Rx QP = %d\n", number);
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: base_queue = %d\n",
+                                  p->base_queue);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_TX_QUEUES:
+               case I40E_AQ_CAP_ID_TXQ:
                        p->num_tx_qp = number;
                        p->base_queue = phys_id;
                        p->num_tx_qp = number;
                        p->base_queue = phys_id;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: Tx QP = %d\n", number);
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: base_queue = %d\n",
+                                  p->base_queue);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_MSIX:
+               case I40E_AQ_CAP_ID_MSIX:
                        p->num_msix_vectors = number;
                        p->num_msix_vectors = number;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: MSIX vector count = %d\n",
+                                  p->num_msix_vectors);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_MSIX_VF:
+               case I40E_AQ_CAP_ID_VF_MSIX:
                        p->num_msix_vectors_vf = number;
                        p->num_msix_vectors_vf = number;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: MSIX VF vector count = %d\n",
+                                  p->num_msix_vectors_vf);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_FLEX10:
+               case I40E_AQ_CAP_ID_FLEX10:
                        if (major_rev == 1) {
                                if (number == 1) {
                                        p->flex10_enable = true;
                        if (major_rev == 1) {
                                if (number == 1) {
                                        p->flex10_enable = true;
@@ -3281,41 +3640,92 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                        }
                        p->flex10_mode = logical_id;
                        p->flex10_status = phys_id;
                        }
                        p->flex10_mode = logical_id;
                        p->flex10_status = phys_id;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: Flex10 mode = %d\n",
+                                  p->flex10_mode);
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: Flex10 status = %d\n",
+                                  p->flex10_status);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_CEM:
+               case I40E_AQ_CAP_ID_CEM:
                        if (number == 1)
                                p->mgmt_cem = true;
                        if (number == 1)
                                p->mgmt_cem = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: CEM = %d\n", p->mgmt_cem);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_IWARP:
+               case I40E_AQ_CAP_ID_IWARP:
                        if (number == 1)
                                p->iwarp = true;
                        if (number == 1)
                                p->iwarp = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: iWARP = %d\n", p->iwarp);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_LED:
+               case I40E_AQ_CAP_ID_LED:
                        if (phys_id < I40E_HW_CAP_MAX_GPIO)
                                p->led[phys_id] = true;
                        if (phys_id < I40E_HW_CAP_MAX_GPIO)
                                p->led[phys_id] = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: LED - PIN %d\n", phys_id);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_SDP:
+               case I40E_AQ_CAP_ID_SDP:
                        if (phys_id < I40E_HW_CAP_MAX_GPIO)
                                p->sdp[phys_id] = true;
                        if (phys_id < I40E_HW_CAP_MAX_GPIO)
                                p->sdp[phys_id] = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: SDP - PIN %d\n", phys_id);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_MDIO:
+               case I40E_AQ_CAP_ID_MDIO:
                        if (number == 1) {
                                p->mdio_port_num = phys_id;
                                p->mdio_port_mode = logical_id;
                        }
                        if (number == 1) {
                                p->mdio_port_num = phys_id;
                                p->mdio_port_mode = logical_id;
                        }
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: MDIO port number = %d\n",
+                                  p->mdio_port_num);
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: MDIO port mode = %d\n",
+                                  p->mdio_port_mode);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_IEEE_1588:
+               case I40E_AQ_CAP_ID_1588:
                        if (number == 1)
                                p->ieee_1588 = true;
                        if (number == 1)
                                p->ieee_1588 = true;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: IEEE 1588 = %d\n",
+                                  p->ieee_1588);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+               case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
                        p->fd = true;
                        p->fd_filters_guaranteed = number;
                        p->fd_filters_best_effort = logical_id;
                        p->fd = true;
                        p->fd_filters_guaranteed = number;
                        p->fd_filters_best_effort = logical_id;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: Flow Director = 1\n");
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: Guaranteed FD filters = %d\n",
+                                  p->fd_filters_guaranteed);
                        break;
                        break;
-               case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+               case I40E_AQ_CAP_ID_WSR_PROT:
                        p->wr_csr_prot = (u64)number;
                        p->wr_csr_prot |= (u64)logical_id << 32;
                        p->wr_csr_prot = (u64)number;
                        p->wr_csr_prot |= (u64)logical_id << 32;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: wr_csr_prot = 0x%llX\n\n",
+                                  (p->wr_csr_prot & 0xffff));
+                       break;
+               case I40E_AQ_CAP_ID_NVM_MGMT:
+                       if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+                               p->sec_rev_disabled = true;
+                       if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+                               p->update_disabled = true;
+                       break;
+               case I40E_AQ_CAP_ID_WOL_AND_PROXY:
+                       hw->num_wol_proxy_filters = (u16)number;
+                       hw->wol_proxy_vsi_seid = (u16)logical_id;
+                       p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK;
+                       if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK)
+                               p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK;
+                       else
+                               p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
+                       p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
+                       p->proxy_support = p->proxy_support;
+                       i40e_debug(hw, I40E_DEBUG_INIT,
+                                  "HW Capability: WOL proxy filters = %d\n",
+                                  hw->num_wol_proxy_filters);
                        break;
                default:
                        break;
                        break;
                default:
                        break;
@@ -3967,7 +4377,7 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
 }
 
 /**
 }
 
 /**
- * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port
  * @hw: pointer to the hw struct
  * @flags: component flags
  * @mac_seid: uplink seid (MAC SEID)
  * @hw: pointer to the hw struct
  * @flags: component flags
  * @mac_seid: uplink seid (MAC SEID)
@@ -4837,7 +5247,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
                return ret;
 
        /* Read the PF Queue Filter control register */
                return ret;
 
        /* Read the PF Queue Filter control register */
-       val = rd32(hw, I40E_PFQF_CTL_0);
+       val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
 
        /* Program required PE hash buckets for the PF */
        val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
 
        /* Program required PE hash buckets for the PF */
        val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
@@ -4874,7 +5284,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
        if (settings->enable_macvlan)
                val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
 
        if (settings->enable_macvlan)
                val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
 
-       wr32(hw, I40E_PFQF_CTL_0, val);
+       i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
 
        return I40E_SUCCESS;
 }
 
        return I40E_SUCCESS;
 }
@@ -4957,13 +5367,42 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
        u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
        i40e_status status;
 
        u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
        i40e_status status;
 
-       status = i40e_aq_add_rem_control_packet_filter(hw, 0, ethtype, flag,
+       status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
                                                       seid, 0, true, NULL,
                                                       NULL);
        if (status)
                hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
 }
 
                                                       seid, 0, true, NULL,
                                                       NULL);
        if (status)
                hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
 }
 
+/**
+ * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
+ * @filters: list of cloud filters
+ * @filter_count: length of list
+ *
+ * There's an issue in the device where the Geneve VNI layout needs
+ * to be shifted 1 byte over from the VxLAN VNI
+ **/
+static void i40e_fix_up_geneve_vni(
+       struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+       u8 filter_count)
+{
+       struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
+       int i;
+
+       for (i = 0; i < filter_count; i++) {
+               u16 tnl_type;
+               u32 ti;
+
+               tnl_type = (le16_to_cpu(f[i].flags) &
+                          I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+                          I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+               if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+                       ti = le32_to_cpu(f[i].tenant_id);
+                       f[i].tenant_id = cpu_to_le32(ti << 8);
+               }
+       }
+}
+
 /**
  * i40e_aq_add_cloud_filters
  * @hw: pointer to the hardware structure
 /**
  * i40e_aq_add_cloud_filters
  * @hw: pointer to the hardware structure
@@ -4984,8 +5423,8 @@ i40e_status i40e_aq_add_cloud_filters(struct i40e_hw *hw,
        struct i40e_aq_desc desc;
        struct i40e_aqc_add_remove_cloud_filters *cmd =
        (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
        struct i40e_aq_desc desc;
        struct i40e_aqc_add_remove_cloud_filters *cmd =
        (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
-       u16 buff_len;
        i40e_status status;
        i40e_status status;
+       u16 buff_len;
 
        i40e_fill_default_direct_cmd_desc(&desc,
                                          i40e_aqc_opc_add_cloud_filters);
 
        i40e_fill_default_direct_cmd_desc(&desc,
                                          i40e_aqc_opc_add_cloud_filters);
@@ -4996,6 +5435,8 @@ i40e_status i40e_aq_add_cloud_filters(struct i40e_hw *hw,
        cmd->num_filters = filter_count;
        cmd->seid = CPU_TO_LE16(seid);
 
        cmd->num_filters = filter_count;
        cmd->seid = CPU_TO_LE16(seid);
 
+       i40e_fix_up_geneve_vni(filters, filter_count);
+
        status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
 
        return status;
        status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
 
        return status;
@@ -5033,6 +5474,8 @@ i40e_status i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
        cmd->num_filters = filter_count;
        cmd->seid = CPU_TO_LE16(seid);
 
        cmd->num_filters = filter_count;
        cmd->seid = CPU_TO_LE16(seid);
 
+       i40e_fix_up_geneve_vni(filters, filter_count);
+
        status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
 
        return status;
        status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
 
        return status;
@@ -5103,8 +5546,6 @@ i40e_status i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
 
        cmd_resp->address = CPU_TO_LE32(addr);
        cmd_resp->length = CPU_TO_LE32(dw_count);
 
        cmd_resp->address = CPU_TO_LE32(addr);
        cmd_resp->length = CPU_TO_LE32(dw_count);
-       cmd_resp->addr_high = CPU_TO_LE32(upper_32_bits((u64)buffer));
-       cmd_resp->addr_low = CPU_TO_LE32(lower_32_bits((u64)buffer));
 
        status = i40e_asq_send_command(hw, &desc, buffer,
                                       lower_32_bits(4*dw_count), NULL);
 
        status = i40e_asq_send_command(hw, &desc, buffer,
                                       lower_32_bits(4*dw_count), NULL);
@@ -5186,8 +5627,6 @@ i40e_status i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
 
        cmd_resp->address = CPU_TO_LE32(addr);
        cmd_resp->length = CPU_TO_LE32(dw_count);
 
        cmd_resp->address = CPU_TO_LE32(addr);
        cmd_resp->length = CPU_TO_LE32(dw_count);
-       cmd_resp->addr_high = CPU_TO_LE32(upper_32_bits((u64)buffer));
-       cmd_resp->addr_low = CPU_TO_LE32(lower_32_bits((u64)buffer));
 
        status = i40e_asq_send_command(hw, &desc, buffer,
                                       lower_32_bits(4*dw_count), NULL);
 
        status = i40e_asq_send_command(hw, &desc, buffer,
                                       lower_32_bits(4*dw_count), NULL);
@@ -5472,3 +5911,609 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
 
        return status;
 }
 
        return status;
 }
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+i40e_status i40e_read_phy_register(struct i40e_hw *hw,
+                                            u8 page, u16 reg, u8 phy_addr,
+                                            u16 *value)
+{
+       i40e_status status = I40E_ERR_TIMEOUT;
+       u32 command  = 0;
+       u16 retry = 1000;
+       u8 port_num = (u8)hw->func_caps.mdio_port_num;
+
+       command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+                 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+                 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+                 (I40E_MDIO_OPCODE_ADDRESS) |
+                 (I40E_MDIO_STCODE) |
+                 (I40E_GLGEN_MSCA_MDICMD_MASK) |
+                 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+       wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+       do {
+               command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+               if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+                       status = I40E_SUCCESS;
+                       break;
+               }
+               udelay(10);
+               retry--;
+       } while (retry);
+
+       if (status) {
+               i40e_debug(hw, I40E_DEBUG_PHY,
+                          "PHY: Can't write command to external PHY.\n");
+               goto phy_read_end;
+       }
+
+       command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+                 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+                 (I40E_MDIO_OPCODE_READ) |
+                 (I40E_MDIO_STCODE) |
+                 (I40E_GLGEN_MSCA_MDICMD_MASK) |
+                 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+       status = I40E_ERR_TIMEOUT;
+       retry = 1000;
+       wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+       do {
+               command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+               if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+                       status = I40E_SUCCESS;
+                       break;
+               }
+               udelay(10);
+               retry--;
+       } while (retry);
+
+       if (!status) {
+               command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+               *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+                        I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+       } else {
+               i40e_debug(hw, I40E_DEBUG_PHY,
+                          "PHY: Can't read register value from external PHY.\n");
+       }
+
+phy_read_end:
+       return status;
+}
+
+/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+                                             u8 page, u16 reg, u8 phy_addr,
+                                             u16 value)
+{
+       i40e_status status = I40E_ERR_TIMEOUT;
+       u32 command  = 0;
+       u16 retry = 1000;
+       u8 port_num = (u8)hw->func_caps.mdio_port_num;
+
+       command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+                 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+                 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+                 (I40E_MDIO_OPCODE_ADDRESS) |
+                 (I40E_MDIO_STCODE) |
+                 (I40E_GLGEN_MSCA_MDICMD_MASK) |
+                 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+       wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+       do {
+               command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+               if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+                       status = I40E_SUCCESS;
+                       break;
+               }
+               udelay(10);
+               retry--;
+       } while (retry);
+       if (status) {
+               i40e_debug(hw, I40E_DEBUG_PHY,
+                          "PHY: Can't write command to external PHY.\n");
+               goto phy_write_end;
+       }
+
+       command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+       wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+       command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+                 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+                 (I40E_MDIO_OPCODE_WRITE) |
+                 (I40E_MDIO_STCODE) |
+                 (I40E_GLGEN_MSCA_MDICMD_MASK) |
+                 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+       status = I40E_ERR_TIMEOUT;
+       retry = 1000;
+       wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+       do {
+               command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+               if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+                       status = I40E_SUCCESS;
+                       break;
+               }
+               udelay(10);
+               retry--;
+       } while (retry);
+
+phy_write_end:
+       return status;
+}
+
+/**
+ * i40e_get_phy_address
+ * @hw: pointer to the HW structure
+ * @dev_num: PHY port num that address we want
+ * @phy_addr: Returned PHY address
+ *
+ * Gets PHY address for current port
+ **/
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
+{
+       u8 port_num = (u8)hw->func_caps.mdio_port_num;
+       u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
+
+       return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
+}
+
+/**
+ * i40e_blink_phy_led
+ * @hw: pointer to the HW structure
+ * @time: time how long led will blinks in secs
+ * @interval: gap between LED on and off in msecs
+ *
+ * Blinks PHY link LED
+ **/
+i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+                                             u32 time, u32 interval)
+{
+       i40e_status status = I40E_SUCCESS;
+       u32 i;
+       u16 led_ctl = 0;
+       u16 gpio_led_port;
+       u16 led_reg;
+       u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+       u8 phy_addr = 0;
+       u8 port_num;
+
+       i = rd32(hw, I40E_PFGEN_PORTNUM);
+       port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+       phy_addr = i40e_get_phy_address(hw, port_num);
+
+       for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+            led_addr++) {
+               status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+                                               led_addr, phy_addr, &led_reg);
+               if (status)
+                       goto phy_blinking_end;
+               led_ctl = led_reg;
+               if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+                       led_reg = 0;
+                       status = i40e_write_phy_register(hw,
+                                                        I40E_PHY_COM_REG_PAGE,
+                                                        led_addr, phy_addr,
+                                                        led_reg);
+                       if (status)
+                               goto phy_blinking_end;
+                       break;
+               }
+       }
+
+       if (time > 0 && interval > 0) {
+               for (i = 0; i < time * 1000; i += interval) {
+                       status = i40e_read_phy_register(hw,
+                                                       I40E_PHY_COM_REG_PAGE,
+                                                       led_addr, phy_addr,
+                                                       &led_reg);
+                       if (status)
+                               goto restore_config;
+                       if (led_reg & I40E_PHY_LED_MANUAL_ON)
+                               led_reg = 0;
+                       else
+                               led_reg = I40E_PHY_LED_MANUAL_ON;
+                       status = i40e_write_phy_register(hw,
+                                                        I40E_PHY_COM_REG_PAGE,
+                                                        led_addr, phy_addr,
+                                                        led_reg);
+                       if (status)
+                               goto restore_config;
+                       msleep(interval);
+               }
+       }
+
+restore_config:
+       status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+                                        phy_addr, led_ctl);
+
+phy_blinking_end:
+       return status;
+}
+
+/**
+ * i40e_led_get_phy - return current on/off mode
+ * @hw: pointer to the hw struct
+ * @led_addr: address of led register to use
+ * @val: original value of register to use
+ *
+ **/
+i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+                                      u16 *val)
+{
+       i40e_status status = I40E_SUCCESS;
+       u16 gpio_led_port;
+       u8 phy_addr = 0;
+       u16 reg_val;
+       u16 temp_addr;
+       u8 port_num;
+       u32 i;
+
+       temp_addr = I40E_PHY_LED_PROV_REG_1;
+       i = rd32(hw, I40E_PFGEN_PORTNUM);
+       port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+       phy_addr = i40e_get_phy_address(hw, port_num);
+
+       for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+            temp_addr++) {
+               status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+                                               temp_addr, phy_addr, &reg_val);
+               if (status)
+                       return status;
+               *val = reg_val;
+               if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+                       *led_addr = temp_addr;
+                       break;
+               }
+       }
+       return status;
+}
+
+/**
+ * i40e_led_set_phy
+ * @hw: pointer to the HW structure
+ * @on: true or false
+ * @mode: original val plus bit for set or ignore
+ * Set led's on or off when controlled by the PHY
+ *
+ **/
+i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
+                                      u16 led_addr, u32 mode)
+{
+       i40e_status status = I40E_SUCCESS;
+       u16 led_ctl = 0;
+       u16 led_reg = 0;
+       u8 phy_addr = 0;
+       u8 port_num;
+       u32 i;
+
+       i = rd32(hw, I40E_PFGEN_PORTNUM);
+       port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+       phy_addr = i40e_get_phy_address(hw, port_num);
+
+       status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+                                       phy_addr, &led_reg);
+       if (status)
+               return status;
+       led_ctl = led_reg;
+       if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+               led_reg = 0;
+               status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+                                                led_addr, phy_addr, led_reg);
+               if (status)
+                       return status;
+       }
+       status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+                                       led_addr, phy_addr, &led_reg);
+       if (status)
+               goto restore_config;
+       if (on)
+               led_reg = I40E_PHY_LED_MANUAL_ON;
+       else
+               led_reg = 0;
+       status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+                                        led_addr, phy_addr, led_reg);
+       if (status)
+               goto restore_config;
+       if (mode & I40E_PHY_LED_MODE_ORIG) {
+               led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
+               status = i40e_write_phy_register(hw,
+                                                I40E_PHY_COM_REG_PAGE,
+                                                led_addr, phy_addr, led_ctl);
+       }
+       return status;
+restore_config:
+       status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+                                        phy_addr, led_ctl);
+       return status;
+}
+
+/**
+ * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: ptr to register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to read the Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+                               u32 reg_addr, u32 *reg_val,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
+               (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+       i40e_status status;
+
+       if (reg_val == NULL)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+
+       cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (status == I40E_SUCCESS)
+               *reg_val = LE32_TO_CPU(cmd_resp->value);
+
+       return status;
+}
+
+/**
+ * i40e_read_rx_ctl - read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ **/
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
+{
+       i40e_status status = I40E_SUCCESS;
+       bool use_register;
+       int retry = 5;
+       u32 val = 0;
+
+       use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+       if (!use_register) {
+do_retry:
+               status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
+               if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+                       usleep_range(1000, 2000);
+                       retry--;
+                       goto do_retry;
+               }
+       }
+
+       /* if the AQ access failed, try the old-fashioned way */
+       if (status || use_register)
+               val = rd32(hw, reg_addr);
+
+       return val;
+}
+
+/**
+ * i40e_aq_rx_ctl_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to write to an Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+                               u32 reg_addr, u32 reg_val,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_rx_ctl_reg_read_write *cmd =
+               (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+
+       cmd->address = CPU_TO_LE32(reg_addr);
+       cmd->value = CPU_TO_LE32(reg_val);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_write_rx_ctl - write to an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ **/
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+       i40e_status status = I40E_SUCCESS;
+       bool use_register;
+       int retry = 5;
+
+       use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+       if (!use_register) {
+do_retry:
+               status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
+                                                      reg_val, NULL);
+               if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+                       usleep_range(1000, 2000);
+                       retry--;
+                       goto do_retry;
+               }
+       }
+
+       /* if the AQ access failed, try the old-fashioned way */
+       if (status || use_register)
+               wr32(hw, reg_addr, reg_val);
+}
+
+/**
+ * i40e_aq_set_arp_proxy_config
+ * @hw: pointer to the HW structure
+ * @proxy_config - pointer to proxy config command table struct
+ * @cmd_details: pointer to command details
+ *
+ * Set ARP offload parameters from pre-populated
+ * i40e_aqc_arp_proxy_data struct
+ **/
+i40e_status i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+                               struct i40e_aqc_arp_proxy_data *proxy_config,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+
+       if (!proxy_config)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+
+       desc.params.external.addr_high =
+                                 CPU_TO_LE32(upper_32_bits((u64)proxy_config));
+       desc.params.external.addr_low =
+                                 CPU_TO_LE32(lower_32_bits((u64)proxy_config));
+
+       status = i40e_asq_send_command(hw, &desc, proxy_config,
+                                      sizeof(struct i40e_aqc_arp_proxy_data),
+                                      cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_opc_set_ns_proxy_table_entry
+ * @hw: pointer to the HW structure
+ * @ns_proxy_table_entry: pointer to NS table entry command struct
+ * @cmd_details: pointer to command details
+ *
+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
+ * from pre-populated i40e_aqc_ns_proxy_data struct
+ **/
+i40e_status i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+                       struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+
+       if (!ns_proxy_table_entry)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                               i40e_aqc_opc_set_ns_proxy_table_entry);
+
+       desc.params.external.addr_high =
+               CPU_TO_LE32(upper_32_bits((u64)ns_proxy_table_entry));
+       desc.params.external.addr_low =
+               CPU_TO_LE32(lower_32_bits((u64)ns_proxy_table_entry));
+
+       status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
+                                      sizeof(struct i40e_aqc_ns_proxy_data),
+                                      cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_clear_wol_filter
+ * @hw: pointer to the hw struct
+ * @filter_index: index of filter to modify (0-7)
+ * @filter: buffer containing filter to be set
+ * @set_filter: true to set filter, false to clear filter
+ * @no_wol_tco: if true, pass through packets cannot cause wake-up
+ *             if false, pass through packets may cause wake-up
+ * @filter_valid: true if filter action is valid
+ * @no_wol_tco_valid: true if no WoL in TCO traffic action valid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear WoL filter for port attached to the PF
+ **/
+i40e_status i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+                               u8 filter_index,
+                               struct i40e_aqc_set_wol_filter_data *filter,
+                               bool set_filter, bool no_wol_tco,
+                               bool filter_valid, bool no_wol_tco_valid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_wol_filter *cmd =
+               (struct i40e_aqc_set_wol_filter *)&desc.params.raw;
+       i40e_status status;
+       u16 cmd_flags = 0;
+       u16 valid_flags = 0;
+       u16 buff_len = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
+
+       if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
+               return  I40E_ERR_PARAM;
+       cmd->filter_index = CPU_TO_LE16(filter_index);
+
+       if (set_filter) {
+               if (!filter)
+                       return  I40E_ERR_PARAM;
+               cmd_flags |= I40E_AQC_SET_WOL_FILTER;
+               buff_len = sizeof(*filter);
+       }
+       if (no_wol_tco)
+               cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
+       cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+       if (filter_valid)
+               valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
+       if (no_wol_tco_valid)
+               valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
+       cmd->valid_flags = CPU_TO_LE16(valid_flags);
+
+       cmd->address_high = CPU_TO_LE32(upper_32_bits((u64)filter));
+       cmd->address_low = CPU_TO_LE32(lower_32_bits((u64)filter));
+
+       status = i40e_asq_send_command(hw, &desc, filter,
+                                      buff_len, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_wake_event_reason
+ * @hw: pointer to the hw struct
+ * @wake_reason: return value, index of matching filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get information for the reason of a Wake Up event
+ **/
+i40e_status i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+                               u16 *wake_reason,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_wake_reason_completion *resp =
+               (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (status == I40E_SUCCESS)
+               *wake_reason = LE16_TO_CPU(resp->wake_reason);
+
+       return status;
+}
+
similarity index 97%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_dcb.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_dcb.c
index ac0053ff49efb6715fe29397cf654796a8492543..5ce8449148952e92b97df293296f893daa706f42 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -380,32 +377,40 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
 {
        u16 length, typelength, offset = 0;
        struct i40e_cee_app_prio *app;
 {
        u16 length, typelength, offset = 0;
        struct i40e_cee_app_prio *app;
-       u8 i, up, selector;
+       u8 i;
 
        typelength = ntohs(tlv->hdr.typelen);
        length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
                       I40E_LLDP_TLV_LEN_SHIFT);
 
 
        typelength = ntohs(tlv->hdr.typelen);
        length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
                       I40E_LLDP_TLV_LEN_SHIFT);
 
-       dcbcfg->numapps = length/sizeof(*app);
+       dcbcfg->numapps = length / sizeof(*app);
        if (!dcbcfg->numapps)
                return;
 
        for (i = 0; i < dcbcfg->numapps; i++) {
        if (!dcbcfg->numapps)
                return;
 
        for (i = 0; i < dcbcfg->numapps; i++) {
+               u8 up, selector;
+
                app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
                for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
                        if (app->prio_map & BIT(up))
                                break;
                }
                dcbcfg->app[i].priority = up;
                app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
                for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
                        if (app->prio_map & BIT(up))
                                break;
                }
                dcbcfg->app[i].priority = up;
+
                /* Get Selector from lower 2 bits, and convert to IEEE */
                selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
                /* Get Selector from lower 2 bits, and convert to IEEE */
                selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
-               if (selector == I40E_CEE_APP_SEL_ETHTYPE)
+               switch (selector) {
+               case I40E_CEE_APP_SEL_ETHTYPE:
                        dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
                        dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
-               else if (selector == I40E_CEE_APP_SEL_TCPIP)
+                       break;
+               case I40E_CEE_APP_SEL_TCPIP:
                        dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
                        dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
-               else
+                       break;
+               default:
                        /* Keep selector as it is for unknown types */
                        dcbcfg->app[i].selector = selector;
                        /* Keep selector as it is for unknown types */
                        dcbcfg->app[i].selector = selector;
+               }
+
                dcbcfg->app[i].protocolid = ntohs(app->protocol);
                /* Move to next app */
                offset += sizeof(*app);
                dcbcfg->app[i].protocolid = ntohs(app->protocol);
                /* Move to next app */
                offset += sizeof(*app);
@@ -815,13 +820,15 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
        struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
        struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
 
        struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
        struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
 
-       /* If Firmware version < v4.33 IEEE only */
-       if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
-           (hw->aq.fw_maj_ver < 4))
+       /* If Firmware version < v4.33 on X710/XL710, IEEE only */
+       if ((hw->mac.type == I40E_MAC_XL710) &&
+           (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+             (hw->aq.fw_maj_ver < 4)))
                return i40e_get_ieee_dcb_config(hw);
 
                return i40e_get_ieee_dcb_config(hw);
 
-       /* If Firmware version == v4.33 use old CEE struct */
-       if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
+       /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
+       if ((hw->mac.type == I40E_MAC_XL710) &&
+           ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
                ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
                                                 sizeof(cee_v1_cfg), NULL);
                if (ret == I40E_SUCCESS) {
                ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
                                                 sizeof(cee_v1_cfg), NULL);
                if (ret == I40E_SUCCESS) {
@@ -1233,14 +1240,12 @@ i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
        u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
        i40e_status ret = I40E_SUCCESS;
        struct i40e_lldp_org_tlv *tlv;
        u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
        i40e_status ret = I40E_SUCCESS;
        struct i40e_lldp_org_tlv *tlv;
-       u16 type, typelength;
+       u16 typelength;
 
        tlv = (struct i40e_lldp_org_tlv *)lldpmib;
        while (1) {
                i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
                typelength = ntohs(tlv->typelength);
 
        tlv = (struct i40e_lldp_org_tlv *)lldpmib;
        while (1) {
                i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
                typelength = ntohs(tlv->typelength);
-               type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
-                               I40E_LLDP_TLV_TYPE_SHIFT);
                length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
                                I40E_LLDP_TLV_LEN_SHIFT);
                if (length)
                length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
                                I40E_LLDP_TLV_LEN_SHIFT);
                if (length)
similarity index 96%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_dcb.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_dcb.h
index 0c000c964985c361040a0074cbb6516dddf61bef..ab8ea65340723bfdb440ed0da3a4bb3b676cc13a 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
similarity index 97%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_dcb_nl.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_dcb_nl.c
index b5d6f6cf8b7990fa217736a593f4d86f03abcfd6..f77a706c9af794d7eb7a63edb96b35fb6e88365f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -199,8 +196,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
                }
        }
 
                }
        }
 
+#ifdef HAVE_DCBNL_IEEE_DELAPP
        /* Notify user-space of the changes */
        dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
        /* Notify user-space of the changes */
        dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+#endif
 }
 
 /**
 }
 
 /**
similarity index 87%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_debugfs.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_debugfs.c
index 54b5d054044266e2ea515e78379ed4d46af1432b..e713569c91056e514bf16906834e3645bd8d216c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -61,256 +58,12 @@ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
 {
        int i;
 
 {
        int i;
 
-       if ((seid < I40E_BASE_VEB_SEID) ||
-           (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB)))
-               dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
-       else
-               for (i = 0; i < I40E_MAX_VEB; i++)
-                       if (pf->veb[i] && pf->veb[i]->seid == seid)
-                               return pf->veb[i];
+       for (i = 0; i < I40E_MAX_VEB; i++)
+               if (pf->veb[i] && pf->veb[i]->seid == seid)
+                       return pf->veb[i];
        return NULL;
 }
 
        return NULL;
 }
 
-/**************************************************************
- * dump
- * The dump entry in debugfs is for getting a data snapshow of
- * the driver's current configuration and runtime details.
- * When the filesystem entry is written, a snapshot is taken.
- * When the entry is read, the most recent snapshot data is dumped.
- **************************************************************/
-static char *i40e_dbg_dump_buf;
-static ssize_t i40e_dbg_dump_data_len;
-static ssize_t i40e_dbg_dump_buffer_len;
-
-/**
- * i40e_dbg_dump_read - read the dump data
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
-                                 size_t count, loff_t *ppos)
-{
-       int bytes_not_copied;
-       int len;
-
-       /* is *ppos bigger than the available data? */
-       if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf)
-               return 0;
-
-       /* be sure to not read beyond the end of available data */
-       len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos));
-
-       bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len);
-       if (bytes_not_copied < 0)
-               return bytes_not_copied;
-
-       *ppos += len;
-       return len;
-}
-
-/**
- * i40e_dbg_prep_dump_buf
- * @pf: the PF we're working with
- * @buflen: the desired buffer length
- *
- * Return positive if success, 0 if failed
- **/
-static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen)
-{
-       /* if not already big enough, prep for re alloc */
-       if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) {
-               kfree(i40e_dbg_dump_buf);
-               i40e_dbg_dump_buffer_len = 0;
-               i40e_dbg_dump_buf = NULL;
-       }
-
-       /* get a new buffer if needed */
-       if (!i40e_dbg_dump_buf) {
-               i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL);
-               if (i40e_dbg_dump_buf != NULL)
-                       i40e_dbg_dump_buffer_len = buflen;
-       }
-
-       return i40e_dbg_dump_buffer_len;
-}
-
-/**
- * i40e_dbg_dump_write - trigger a datadump snapshot
- * @filp: the opened file
- * @buffer: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- *
- * Any write clears the stats
- **/
-static ssize_t i40e_dbg_dump_write(struct file *filp,
-                                  const char __user *buffer,
-                                  size_t count, loff_t *ppos)
-{
-       struct i40e_pf *pf = filp->private_data;
-       bool seid_found = false;
-       long seid = -1;
-       int buflen = 0;
-       int i, ret;
-       int len;
-       u8 *p;
-
-       /* don't allow partial writes */
-       if (*ppos != 0)
-               return 0;
-
-       /* decode the SEID given to be dumped */
-       ret = kstrtol_from_user(buffer, count, 0, &seid);
-
-       if (ret) {
-               dev_info(&pf->pdev->dev, "bad seid value\n");
-       } else if (seid == 0) {
-               seid_found = true;
-
-               kfree(i40e_dbg_dump_buf);
-               i40e_dbg_dump_buffer_len = 0;
-               i40e_dbg_dump_data_len = 0;
-               i40e_dbg_dump_buf = NULL;
-               dev_info(&pf->pdev->dev, "debug buffer freed\n");
-
-       } else if (seid == pf->pf_seid || seid == 1) {
-               seid_found = true;
-
-               buflen = sizeof(struct i40e_pf);
-               buflen += (sizeof(struct i40e_aq_desc)
-                    * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries));
-
-               if (i40e_dbg_prep_dump_buf(pf, buflen)) {
-                       p = i40e_dbg_dump_buf;
-
-                       len = sizeof(struct i40e_pf);
-                       memcpy(p, pf, len);
-                       p += len;
-
-                       len = (sizeof(struct i40e_aq_desc)
-                                       * pf->hw.aq.num_asq_entries);
-                       memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
-                       p += len;
-
-                       len = (sizeof(struct i40e_aq_desc)
-                                       * pf->hw.aq.num_arq_entries);
-                       memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
-                       p += len;
-
-                       i40e_dbg_dump_data_len = buflen;
-                       dev_info(&pf->pdev->dev,
-                                "PF seid %ld dumped %d bytes\n",
-                                seid, (int)i40e_dbg_dump_data_len);
-               }
-       } else if (seid >= I40E_BASE_VSI_SEID) {
-               struct i40e_vsi *vsi = NULL;
-               struct i40e_mac_filter *f;
-               int filter_count = 0;
-
-               mutex_lock(&pf->switch_mutex);
-               vsi = i40e_dbg_find_vsi(pf, seid);
-               if (!vsi) {
-                       mutex_unlock(&pf->switch_mutex);
-                       goto write_exit;
-               }
-
-               buflen = sizeof(struct i40e_vsi);
-               buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors;
-               buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs;
-               buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs;
-               buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs;
-               list_for_each_entry(f, &vsi->mac_filter_list, list)
-                       filter_count++;
-               buflen += sizeof(struct i40e_mac_filter) * filter_count;
-
-               if (i40e_dbg_prep_dump_buf(pf, buflen)) {
-                       p = i40e_dbg_dump_buf;
-                       seid_found = true;
-
-                       len = sizeof(struct i40e_vsi);
-                       memcpy(p, vsi, len);
-                       p += len;
-
-                       if (vsi->num_q_vectors) {
-                               len = (sizeof(struct i40e_q_vector)
-                                       * vsi->num_q_vectors);
-                               memcpy(p, vsi->q_vectors, len);
-                               p += len;
-                       }
-
-                       if (vsi->num_queue_pairs) {
-                               len = (sizeof(struct i40e_ring) *
-                                     vsi->num_queue_pairs);
-                               memcpy(p, vsi->tx_rings, len);
-                               p += len;
-                               memcpy(p, vsi->rx_rings, len);
-                               p += len;
-                       }
-
-                       if (vsi->tx_rings[0]) {
-                               len = sizeof(struct i40e_tx_buffer);
-                               for (i = 0; i < vsi->num_queue_pairs; i++) {
-                                       memcpy(p, vsi->tx_rings[i]->tx_bi, len);
-                                       p += len;
-                               }
-                               len = sizeof(struct i40e_rx_buffer);
-                               for (i = 0; i < vsi->num_queue_pairs; i++) {
-                                       memcpy(p, vsi->rx_rings[i]->rx_bi, len);
-                                       p += len;
-                               }
-                       }
-
-                       /* macvlan filter list */
-                       len = sizeof(struct i40e_mac_filter);
-                       list_for_each_entry(f, &vsi->mac_filter_list, list) {
-                               memcpy(p, f, len);
-                               p += len;
-                       }
-
-                       i40e_dbg_dump_data_len = buflen;
-                       dev_info(&pf->pdev->dev,
-                                "VSI seid %ld dumped %d bytes\n",
-                                seid, (int)i40e_dbg_dump_data_len);
-               }
-               mutex_unlock(&pf->switch_mutex);
-       } else if (seid >= I40E_BASE_VEB_SEID) {
-               struct i40e_veb *veb = NULL;
-
-               mutex_lock(&pf->switch_mutex);
-               veb = i40e_dbg_find_veb(pf, seid);
-               if (!veb) {
-                       mutex_unlock(&pf->switch_mutex);
-                       goto write_exit;
-               }
-
-               buflen = sizeof(struct i40e_veb);
-               if (i40e_dbg_prep_dump_buf(pf, buflen)) {
-                       seid_found = true;
-                       memcpy(i40e_dbg_dump_buf, veb, buflen);
-                       i40e_dbg_dump_data_len = buflen;
-                       dev_info(&pf->pdev->dev,
-                                "VEB seid %ld dumped %d bytes\n",
-                                seid, (int)i40e_dbg_dump_data_len);
-               }
-               mutex_unlock(&pf->switch_mutex);
-       }
-
-write_exit:
-       if (!seid_found)
-               dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid);
-
-       return count;
-}
-
-static const struct file_operations i40e_dbg_dump_fops = {
-       .owner = THIS_MODULE,
-       .open =  simple_open,
-       .read =  i40e_dbg_dump_read,
-       .write = i40e_dbg_dump_write,
-};
-
 /**************************************************************
  * command
  * The command entry in debugfs is for giving the driver commands
 /**************************************************************
  * command
  * The command entry in debugfs is for giving the driver commands
@@ -332,7 +85,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
 {
        struct i40e_pf *pf = filp->private_data;
        int bytes_not_copied;
 {
        struct i40e_pf *pf = filp->private_data;
        int bytes_not_copied;
-       int buf_size = 256;
+       size_t buf_size = 256;
        char *buf;
        int len;
 
        char *buf;
        int len;
 
@@ -353,8 +106,8 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
        bytes_not_copied = copy_to_user(buffer, buf, len);
        kfree(buf);
 
        bytes_not_copied = copy_to_user(buffer, buf, len);
        kfree(buf);
 
-       if (bytes_not_copied < 0)
-               return bytes_not_copied;
+       if (bytes_not_copied)
+               return -EFAULT;
 
        *ppos = len;
        return len;
 
        *ppos = len;
        return len;
@@ -383,25 +136,44 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                return;
        }
        dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
                return;
        }
        dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
-       if (vsi->netdev)
-               dev_info(&pf->pdev->dev,
-                        "    netdev: name = %s\n",
-                        vsi->netdev->name);
+       if (vsi->netdev) {
+               struct net_device *nd = vsi->netdev;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+               u32 hw_features;
+#endif
+
+               dev_info(&pf->pdev->dev, "    netdev: name = %s, state = %lu, flags = 0x%08x\n",
+                        nd->name, nd->state, nd->flags);
+               dev_info(&pf->pdev->dev, "        features      = 0x%08lx\n",
+                        (unsigned long int)nd->features);
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+               hw_features = get_netdev_hw_features(vsi->netdev);
+               dev_info(&pf->pdev->dev, "        hw_features   = 0x%08x\n",
+                        hw_features);
+#else
+               dev_info(&pf->pdev->dev, "        hw_features   = 0x%08lx\n",
+                        (unsigned long int)nd->hw_features);
+#endif
+#endif
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+               dev_info(&pf->pdev->dev, "        vlan_features = 0x%08lx\n",
+                        (unsigned long int)nd->vlan_features);
+#endif
+       }
 #ifdef HAVE_VLAN_RX_REGISTER
        if (vsi->vlgrp)
                dev_info(&pf->pdev->dev,
                         "    vlgrp: & = %p\n", vsi->vlgrp);
 #else
 #ifdef HAVE_VLAN_RX_REGISTER
        if (vsi->vlgrp)
                dev_info(&pf->pdev->dev,
                         "    vlgrp: & = %p\n", vsi->vlgrp);
 #else
-       if (vsi->active_vlans)
-               dev_info(&pf->pdev->dev,
-                        "    vlgrp: & = %p\n", vsi->active_vlans);
+       dev_info(&pf->pdev->dev, "    vlgrp: & = %p\n", vsi->active_vlans);
 #endif /* HAVE_VLAN_RX_REGISTER */
        dev_info(&pf->pdev->dev,
 #endif /* HAVE_VLAN_RX_REGISTER */
        dev_info(&pf->pdev->dev,
-                "    netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
-                vsi->netdev_registered,
-                vsi->current_netdev_flags, vsi->state, vsi->flags);
+                "    state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
+                vsi->state, vsi->flags,
+                vsi->netdev_registered, vsi->current_netdev_flags);
        if (vsi == pf->vsi[pf->lan_vsi])
        if (vsi == pf->vsi[pf->lan_vsi])
-               dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+               dev_info(&pf->pdev->dev, "    MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
                         pf->hw.mac.addr,
                         pf->hw.mac.san_addr,
                         pf->hw.mac.port_addr);
                         pf->hw.mac.addr,
                         pf->hw.mac.san_addr,
                         pf->hw.mac.port_addr);
@@ -515,13 +287,13 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                         rx_ring->queue_index,
                         rx_ring->reg_idx);
                dev_info(&pf->pdev->dev,
                         rx_ring->queue_index,
                         rx_ring->reg_idx);
                dev_info(&pf->pdev->dev,
-                        "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
-                        i, rx_ring->rx_hdr_len,
+                        "    rx_rings[%i]: rx_buf_len = %d, dtype = %d\n",
+                        i,
                         rx_ring->rx_buf_len,
                         rx_ring->rx_buf_len,
-                        rx_ring->dtype);
+                        0);
                dev_info(&pf->pdev->dev,
                dev_info(&pf->pdev->dev,
-                        "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-                        i, rx_ring->hsplit,
+                        "    rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                        i,
                         rx_ring->next_to_use,
                         rx_ring->next_to_clean,
                         rx_ring->ring_active);
                         rx_ring->next_to_use,
                         rx_ring->next_to_clean,
                         rx_ring->ring_active);
@@ -535,6 +307,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                         i,
                         rx_ring->rx_stats.alloc_page_failed,
                         rx_ring->rx_stats.alloc_buff_failed);
                         i,
                         rx_ring->rx_stats.alloc_page_failed,
                         rx_ring->rx_stats.alloc_buff_failed);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n",
+                        i,
+                        rx_ring->rx_stats.realloc_count,
+                        rx_ring->rx_stats.page_reuse_count);
                dev_info(&pf->pdev->dev,
                         "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
                         i, rx_ring->size,
                dev_info(&pf->pdev->dev,
                         "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
                         i, rx_ring->size,
@@ -565,10 +342,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                         tx_ring->reg_idx);
                dev_info(&pf->pdev->dev,
                         "    tx_rings[%i]: dtype = %d\n",
                         tx_ring->reg_idx);
                dev_info(&pf->pdev->dev,
                         "    tx_rings[%i]: dtype = %d\n",
-                        i, tx_ring->dtype);
+                        i, 0);
                dev_info(&pf->pdev->dev,
                dev_info(&pf->pdev->dev,
-                        "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-                        i, tx_ring->hsplit,
+                        "    tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                        i,
                         tx_ring->next_to_use,
                         tx_ring->next_to_clean,
                         tx_ring->ring_active);
                         tx_ring->next_to_use,
                         tx_ring->next_to_clean,
                         tx_ring->ring_active);
@@ -602,8 +379,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                 vsi->tx_itr_setting,
                 ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
        dev_info(&pf->pdev->dev,
                 vsi->tx_itr_setting,
                 ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
        dev_info(&pf->pdev->dev,
-                "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
-                vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+                "    max_frame = %d, rx_buf_len = %d dtype = %d\n",
+                vsi->max_frame, vsi->rx_buf_len, 0);
        dev_info(&pf->pdev->dev,
                 "    num_q_vectors = %i, base_vector = %i\n",
                 vsi->num_q_vectors, vsi->base_vector);
        dev_info(&pf->pdev->dev,
                 "    num_q_vectors = %i, base_vector = %i\n",
                 vsi->num_q_vectors, vsi->base_vector);
@@ -831,20 +608,13 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                        if (!is_rx_ring) {
                                txd = I40E_TX_DESC(&ring, i);
                                dev_info(&pf->pdev->dev,
                        if (!is_rx_ring) {
                                txd = I40E_TX_DESC(&ring, i);
                                dev_info(&pf->pdev->dev,
-                                        "   d[%03i] = 0x%016llx 0x%016llx\n",
+                                        "   d[%03x] = 0x%016llx 0x%016llx\n",
                                         i, txd->buffer_addr,
                                         txd->cmd_type_offset_bsz);
                                         i, txd->buffer_addr,
                                         txd->cmd_type_offset_bsz);
-                       } else if (sizeof(union i40e_rx_desc) ==
-                                  sizeof(union i40e_16byte_rx_desc)) {
-                               rxd = I40E_RX_DESC(&ring, i);
-                               dev_info(&pf->pdev->dev,
-                                        "   d[%03i] = 0x%016llx 0x%016llx\n",
-                                        i, rxd->read.pkt_addr,
-                                        rxd->read.hdr_addr);
                        } else {
                                rxd = I40E_RX_DESC(&ring, i);
                                dev_info(&pf->pdev->dev,
                        } else {
                                rxd = I40E_RX_DESC(&ring, i);
                                dev_info(&pf->pdev->dev,
-                                        "   d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+                                        "   d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
                                         i, rxd->read.pkt_addr,
                                         rxd->read.hdr_addr,
                                         rxd->read.rsvd1, rxd->read.rsvd2);
                                         i, rxd->read.pkt_addr,
                                         rxd->read.hdr_addr,
                                         rxd->read.rsvd1, rxd->read.rsvd2);
@@ -859,20 +629,13 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                if (!is_rx_ring) {
                        txd = I40E_TX_DESC(&ring, desc_n);
                        dev_info(&pf->pdev->dev,
                if (!is_rx_ring) {
                        txd = I40E_TX_DESC(&ring, desc_n);
                        dev_info(&pf->pdev->dev,
-                                "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+                                "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
                                 vsi_seid, ring_id, desc_n,
                                 txd->buffer_addr, txd->cmd_type_offset_bsz);
                                 vsi_seid, ring_id, desc_n,
                                 txd->buffer_addr, txd->cmd_type_offset_bsz);
-               } else if (sizeof(union i40e_rx_desc) ==
-                          sizeof(union i40e_16byte_rx_desc)) {
-                       rxd = I40E_RX_DESC(&ring, desc_n);
-                       dev_info(&pf->pdev->dev,
-                                "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
-                                vsi_seid, ring_id, desc_n,
-                                rxd->read.pkt_addr, rxd->read.hdr_addr);
                } else {
                        rxd = I40E_RX_DESC(&ring, desc_n);
                        dev_info(&pf->pdev->dev,
                } else {
                        rxd = I40E_RX_DESC(&ring, desc_n);
                        dev_info(&pf->pdev->dev,
-                                "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+                                "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
                                 vsi_seid, ring_id, desc_n,
                                 rxd->read.pkt_addr, rxd->read.hdr_addr,
                                 rxd->read.rsvd1, rxd->read.rsvd2);
                                 vsi_seid, ring_id, desc_n,
                                 rxd->read.pkt_addr, rxd->read.hdr_addr,
                                 rxd->read.rsvd1, rxd->read.rsvd2);
@@ -896,6 +659,112 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
                                 i, pf->vsi[i]->seid);
 }
 
                                 i, pf->vsi[i]->seid);
 }
 
+/**
+ * i40e_dbg_dump_resources - handles dump resources request
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_resources(struct i40e_pf *pf)
+{
+       struct i40e_aqc_switch_resource_alloc_element_resp *buf;
+       int buf_len;
+       u16 count = 32;
+       u8 num_entries;
+       int ret, i;
+
+       buf_len = count * sizeof(*buf);
+       buf = kzalloc(buf_len, GFP_KERNEL);
+       if (!buf) {
+               dev_err(&pf->pdev->dev, "Can't get memory\n");
+               return;
+       }
+
+       ret = i40e_aq_get_switch_resource_alloc(&pf->hw, &num_entries,
+                                               buf, count, NULL);
+       if (ret) {
+               dev_err(&pf->pdev->dev,
+                       "fail to get resources, err %s aq_err %s\n",
+                       i40e_stat_str(&pf->hw, ret),
+                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               kfree(buf);
+               return;
+       }
+
+       dev_info(&pf->pdev->dev, "  resources:\n");
+       dev_info(&pf->pdev->dev, "  guar  total  used unalloc   name\n");
+       for (i = 0; i < num_entries; i++) {
+               char *p;
+
+               switch (buf[i].resource_type) {
+               case I40E_AQ_RESOURCE_TYPE_VEB:
+                       p = "vebs";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_VSI:
+                       p = "vsis";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_MACADDR:
+                       p = "macaddrs";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_STAG:
+                       p = "stags";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_ETAG:
+                       p = "etags";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
+                       p = "multicast hash";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_UNICAST_HASH:
+                       p = "unicast hash";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_VLAN:
+                       p = "vlans";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY:
+                       p = "vsi list entries";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY:
+                       p = "etag list entries";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL:
+                       p = "vlan stat pools";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_MIRROR_RULE:
+                       p = "mirror rules";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_QUEUE_SETS:
+                       p = "queue sets";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS:
+                       p = "vlan filters";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS:
+                       p = "inner mac filters";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_IP_FILTERS:
+                       p = "ip filters";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS:
+                       p = "gre vn keys";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_VN2_KEYS:
+                       p = "vn2 keys";
+                       break;
+               case I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS:
+                       p = "tunnel ports";
+                       break;
+               default:
+                       p = "unknown";
+                       break;
+               }
+
+               dev_info(&pf->pdev->dev, "  %4d   %4d  %4d  %4d   %s\n",
+                        buf[i].guaranteed, buf[i].total, buf[i].used,
+                        buf[i].total_unalloced, p);
+       }
+
+       kfree(buf);
+}
+
 /**
  * i40e_dbg_dump_capabilities - handles dump capabilities request
  * @pf: the i40e_pf created in command write
 /**
  * i40e_dbg_dump_capabilities - handles dump capabilities request
  * @pf: the i40e_pf created in command write
@@ -980,12 +849,6 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
        struct i40e_veb *veb;
        int i;
 
        struct i40e_veb *veb;
        int i;
 
-       if ((seid < I40E_BASE_VEB_SEID) ||
-           (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) {
-               dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
-               return;
-       }
-
        veb = i40e_dbg_find_veb(pf, seid);
        if (!veb) {
                dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
        veb = i40e_dbg_find_veb(pf, seid);
        if (!veb) {
                dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
@@ -1113,14 +976,61 @@ static void i40e_dbg_dump_dcb_cfg(struct i40e_pf *pf,
                 cfg->pfc.pfccap, cfg->pfc.pfcenable);
 
        dev_info(&pf->pdev->dev,
                 cfg->pfc.pfccap, cfg->pfc.pfcenable);
 
        dev_info(&pf->pdev->dev,
-                "%s app_table: num_apps=%d\n", prefix, cfg->numapps);
-       for (i = 0; i < cfg->numapps; i++) {
+                "%s app_table: num_apps=%d\n", prefix, (int)cfg->numapps);
+       for (i = 0; i < (int)cfg->numapps; i++) {
                dev_info(&pf->pdev->dev, "%s app_table: %d prio=%d selector=%d protocol=0x%x\n",
                         prefix, i, cfg->app[i].priority,
                         cfg->app[i].selector,
                         cfg->app[i].protocolid);
        }
 }
                dev_info(&pf->pdev->dev, "%s app_table: %d prio=%d selector=%d protocol=0x%x\n",
                         prefix, i, cfg->app[i].priority,
                         cfg->app[i].selector,
                         cfg->app[i].protocolid);
        }
 }
+
+/**
+ * i40e_dbg_dump_fdir_filter - Dump out flow director filter contents
+ * @pf: the corresponding PF
+ * @f: the flow director filter
+ **/
+static inline void i40e_dbg_dump_fdir_filter(struct i40e_pf *pf,
+                                            struct i40e_fdir_filter *f)
+{
+       dev_info(&pf->pdev->dev, "fdir filter %d:\n", f->fd_id);
+       dev_info(&pf->pdev->dev, "    flow_type=%d ip4_proto=%d\n",
+                f->flow_type, f->ip4_proto);
+       dev_info(&pf->pdev->dev, "    dst_ip[0]= %pi4  dst_port=%d\n",
+                f->dst_ip, f->dst_port);
+       dev_info(&pf->pdev->dev, "    src_ip[0]= %pi4  src_port=%d\n",
+                f->src_ip, f->src_port);
+       dev_info(&pf->pdev->dev, "    sctp_v_tag=%d q_index=%d flex_off=%d\n",
+                f->sctp_v_tag, f->q_index, f->flex_off);
+       dev_info(&pf->pdev->dev, "    pctype=%d dest_vsi=%d dest_ctl=%d\n",
+                f->pctype, f->dest_vsi, f->dest_ctl);
+       dev_info(&pf->pdev->dev, "    fd_status=%d cnt_index=%d\n",
+                f->fd_status, f->cnt_index);
+       if (i40e_is_flex_filter(f))
+               dev_info(&pf->pdev->dev, "    pattern(user-def N)=%u offset_in_payload(user-def m)=%u\n",
+                        be16_to_cpu(f->flex_bytes[3]),
+                        (u16)~(be16_to_cpu(f->flex_mask[3])));
+}
+
+/**
+ * i40e_dbg_dump_cloud_filter - Dump out cloud filter contents
+ * @pf: the corresponding PF
+ * @f: the flow director filter
+ **/
+static inline void i40e_dbg_dump_cloud_filter(struct i40e_pf *pf,
+                                             struct i40e_cloud_filter *f)
+{
+       dev_info(&pf->pdev->dev, "cloud filter %d:\n", f->id);
+       dev_info(&pf->pdev->dev, "    outer_mac[]=%pM  inner_mac=%pM\n",
+                f->outer_mac, f->inner_mac);
+       dev_info(&pf->pdev->dev, "    inner_vlan %d, inner_ip[0] %pi4\n",
+                be16_to_cpu(f->inner_vlan), f->inner_ip);
+       dev_info(&pf->pdev->dev, "    tenant_id=%d flags=0x%02x, tunnel_type=0x%02x\n",
+                f->tenant_id, f->flags, f->tunnel_type);
+       dev_info(&pf->pdev->dev, "    seid=%d queue_id=%d\n",
+                f->seid, f->queue_id);
+}
+
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
 /**
  * i40e_dbg_command_write - write into command datum
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
 /**
  * i40e_dbg_command_write - write into command datum
@@ -1150,12 +1060,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        if (!cmd_buf)
                return count;
        bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
        if (!cmd_buf)
                return count;
        bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
-       if (bytes_not_copied < 0) {
+       if (bytes_not_copied) {
                kfree(cmd_buf);
                kfree(cmd_buf);
-               return bytes_not_copied;
+               return -EFAULT;
        }
        }
-       if (bytes_not_copied > 0)
-               count -= bytes_not_copied;
        cmd_buf[count] = '\0';
 
        cmd_buf_tmp = strchr(cmd_buf, '\n');
        cmd_buf[count] = '\0';
 
        cmd_buf_tmp = strchr(cmd_buf, '\n');
@@ -1351,7 +1259,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                spin_lock_bh(&vsi->mac_filter_list_lock);
                f = i40e_add_filter(vsi, ma, vlan, false, false);
                spin_unlock_bh(&vsi->mac_filter_list_lock);
                spin_lock_bh(&vsi->mac_filter_list_lock);
                f = i40e_add_filter(vsi, ma, vlan, false, false);
                spin_unlock_bh(&vsi->mac_filter_list_lock);
-               ret = i40e_sync_vsi_filters(vsi, true);
+               ret = i40e_sync_vsi_filters(vsi);
                if (f && !ret)
                        dev_info(&pf->pdev->dev,
                                 "add macaddr: %pM vlan=%d added to VSI %d\n",
                if (f && !ret)
                        dev_info(&pf->pdev->dev,
                                 "add macaddr: %pM vlan=%d added to VSI %d\n",
@@ -1390,7 +1298,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                spin_lock_bh(&vsi->mac_filter_list_lock);
                i40e_del_filter(vsi, ma, vlan, false, false);
                spin_unlock_bh(&vsi->mac_filter_list_lock);
                spin_lock_bh(&vsi->mac_filter_list_lock);
                i40e_del_filter(vsi, ma, vlan, false, false);
                spin_unlock_bh(&vsi->mac_filter_list_lock);
-               ret = i40e_sync_vsi_filters(vsi, true);
+               ret = i40e_sync_vsi_filters(vsi);
                if (!ret)
                        dev_info(&pf->pdev->dev,
                                 "del macaddr: %pM vlan=%d removed from VSI %d\n",
                if (!ret)
                        dev_info(&pf->pdev->dev,
                                 "del macaddr: %pM vlan=%d removed from VSI %d\n",
@@ -1454,6 +1362,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        } else if (strncmp(cmd_buf, "dump", 4) == 0) {
                if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
                        i40e_fetch_switch_configuration(pf, true);
        } else if (strncmp(cmd_buf, "dump", 4) == 0) {
                if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
                        i40e_fetch_switch_configuration(pf, true);
+               } else if (strncmp(&cmd_buf[5], "resources", 9) == 0) {
+                       i40e_dbg_dump_resources(pf);
                } else if (strncmp(&cmd_buf[5], "capabilities", 7) == 0) {
                        i40e_dbg_dump_capabilities(pf);
                } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
                } else if (strncmp(&cmd_buf[5], "capabilities", 7) == 0) {
                        i40e_dbg_dump_capabilities(pf);
                } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
@@ -1609,17 +1519,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                       buff, rlen, true);
                        kfree(buff);
                        buff = NULL;
                                       buff, rlen, true);
                        kfree(buff);
                        buff = NULL;
+               } else if (strncmp(&cmd_buf[5], "filters", 7) == 0) {
+                       struct i40e_fdir_filter *f_rule;
+                       struct i40e_cloud_filter *c_rule;
+                       struct hlist_node *node2;
+
+                       hlist_for_each_entry_safe(f_rule, node2,
+                                                 &pf->fdir_filter_list,
+                                                 fdir_node) {
+                               i40e_dbg_dump_fdir_filter(pf, f_rule);
+                       }
+
+                       /* find the cloud filter rule ids */
+                       hlist_for_each_entry_safe(c_rule, node2,
+                                                 &pf->cloud_filter_list,
+                                                 cloud_node) {
+                               i40e_dbg_dump_cloud_filter(pf, c_rule);
+                       }
                } else {
                        dev_info(&pf->pdev->dev,
                                 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
                        dev_info(&pf->pdev->dev, "dump switch\n");
                        dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
                        dev_info(&pf->pdev->dev, "dump capabilities\n");
                } else {
                        dev_info(&pf->pdev->dev,
                                 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
                        dev_info(&pf->pdev->dev, "dump switch\n");
                        dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
                        dev_info(&pf->pdev->dev, "dump capabilities\n");
+                       dev_info(&pf->pdev->dev, "dump resources\n");
                        dev_info(&pf->pdev->dev, "dump reset stats\n");
                        dev_info(&pf->pdev->dev, "dump port\n");
                        dev_info(&pf->pdev->dev, "dump VF [vf_id]\n");
                        dev_info(&pf->pdev->dev,
                                 "dump debug fwdata <cluster_id> <table_id> <index>\n");
                        dev_info(&pf->pdev->dev, "dump reset stats\n");
                        dev_info(&pf->pdev->dev, "dump port\n");
                        dev_info(&pf->pdev->dev, "dump VF [vf_id]\n");
                        dev_info(&pf->pdev->dev,
                                 "dump debug fwdata <cluster_id> <table_id> <index>\n");
+                       dev_info(&pf->pdev->dev, "dump filters\n");
                }
 
        } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
                }
 
        } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
@@ -2149,7 +2078,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                rtnl_lock();
                i40e_reconfig_rss_queues(pf, q_count);
                rtnl_unlock();
                rtnl_lock();
                i40e_reconfig_rss_queues(pf, q_count);
                rtnl_unlock();
-               dev_info(&pf->pdev->dev, "new rss_size %d\n", pf->rss_size);
+               dev_info(&pf->pdev->dev, "new rss_size %d\n",
+                        pf->alloc_rss_size);
        } else if (strncmp(cmd_buf, "get bw", 6) == 0) {
                i40e_status status;
                u32 max_bw, min_bw;
        } else if (strncmp(cmd_buf, "get bw", 6) == 0) {
                i40e_status status;
                u32 max_bw, min_bw;
@@ -2270,6 +2200,109 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        dev_info(&pf->pdev->dev,
                                 "BW settings NOT SAVED - error %d:%d updating NVM\n",
                                 aq_status, last_aq_status);
                        dev_info(&pf->pdev->dev,
                                 "BW settings NOT SAVED - error %d:%d updating NVM\n",
                                 aq_status, last_aq_status);
+       } else if (strncmp(cmd_buf, "add switch ingress mirror", 25) == 0) {
+               u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
+               u16 switch_seid, dst_vsi_seid, rule_id;
+               i40e_status aq_status;
+
+               cnt = sscanf(&cmd_buf[25], "%hu %hu",
+                            &switch_seid, &dst_vsi_seid);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev,
+                                "add mirror: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               }
+
+               aq_status =
+                       i40e_aq_add_mirrorrule(&pf->hw,
+                                              switch_seid, rule_type,
+                                              dst_vsi_seid, 0, NULL, NULL,
+                                              &rule_id, NULL, NULL);
+               if (aq_status)
+                       dev_info(&pf->pdev->dev,
+                                "add ingress mirror failed with status %d\n",
+                                aq_status);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "Ingress mirror rule %d added\n", rule_id);
+       } else if (strncmp(cmd_buf, "add switch egress mirror", 24) == 0) {
+               u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
+               u16 switch_seid, dst_vsi_seid, rule_id;
+               i40e_status aq_status;
+
+               cnt = sscanf(&cmd_buf[24], "%hu %hu",
+                            &switch_seid, &dst_vsi_seid);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev,
+                                "add mirror: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               }
+
+               aq_status =
+                       i40e_aq_add_mirrorrule(&pf->hw,
+                                              switch_seid, rule_type,
+                                              dst_vsi_seid, 0, NULL, NULL,
+                                              &rule_id, NULL, NULL);
+               if (aq_status)
+                       dev_info(&pf->pdev->dev,
+                                "add egress mirror failed with status %d\n",
+                                aq_status);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "Egress mirror rule %d added\n", rule_id);
+       } else if (strncmp(cmd_buf, "del switch ingress mirror", 25) == 0) {
+               u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
+               i40e_status aq_status;
+               u16 switch_seid, rule_id;
+
+               cnt = sscanf(&cmd_buf[25], "%hu %hu",
+                            &switch_seid, &rule_id);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev,
+                                "del mirror: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               }
+
+               aq_status =
+                       i40e_aq_delete_mirrorrule(&pf->hw, switch_seid,
+                                                 rule_type, rule_id, 0, NULL,
+                                                 NULL, NULL, NULL);
+               if (aq_status)
+                       dev_info(&pf->pdev->dev,
+                                "mirror rule remove failed with status %d\n",
+                                aq_status);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "Mirror rule %d removed\n", rule_id);
+       } else if (strncmp(cmd_buf, "del switch egress mirror", 24) == 0) {
+               u16 rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
+               i40e_status aq_status;
+               u16 switch_seid, rule_id;
+
+               cnt = sscanf(&cmd_buf[24], "%hu %hu",
+                            &switch_seid, &rule_id);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev,
+                                "del mirror: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               }
+
+               aq_status =
+                       i40e_aq_delete_mirrorrule(&pf->hw, switch_seid,
+                                                 rule_type, rule_id, 0, NULL,
+                                                 NULL, NULL, NULL);
+               if (aq_status)
+                       dev_info(&pf->pdev->dev,
+                                "mirror rule remove failed with status %d\n",
+                                aq_status);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "Mirror rule %d removed\n", rule_id);
+
        } else {
                dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
                dev_info(&pf->pdev->dev, "available commands\n");
        } else {
                dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
                dev_info(&pf->pdev->dev, "available commands\n");
@@ -2284,6 +2317,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  dump switch\n");
                dev_info(&pf->pdev->dev, "  dump vsi [seid]\n");
                dev_info(&pf->pdev->dev, "  dump capabilities\n");
                dev_info(&pf->pdev->dev, "  dump switch\n");
                dev_info(&pf->pdev->dev, "  dump vsi [seid]\n");
                dev_info(&pf->pdev->dev, "  dump capabilities\n");
+               dev_info(&pf->pdev->dev, "  dump resources\n");
                dev_info(&pf->pdev->dev, "  dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
                dev_info(&pf->pdev->dev, "  dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
                dev_info(&pf->pdev->dev, "  dump desc aq\n");
                dev_info(&pf->pdev->dev, "  dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
                dev_info(&pf->pdev->dev, "  dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
                dev_info(&pf->pdev->dev, "  dump desc aq\n");
@@ -2319,6 +2353,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  get bw\n");
                dev_info(&pf->pdev->dev, "  set bw <MAX> <MIN>\n");
                dev_info(&pf->pdev->dev, "  commit bw\n");
                dev_info(&pf->pdev->dev, "  get bw\n");
                dev_info(&pf->pdev->dev, "  set bw <MAX> <MIN>\n");
                dev_info(&pf->pdev->dev, "  commit bw\n");
+               dev_info(&pf->pdev->dev, "  add switch ingress mirror <sw_seid> <dst_seid>\n");
+               dev_info(&pf->pdev->dev, "  add switch egress mirror <sw_seid> <dst_seid>\n");
+               dev_info(&pf->pdev->dev, "  del switch ingress mirror <sw_seid> <rule_id>\n");
+               dev_info(&pf->pdev->dev, "  del switch egress mirror <sw_seid> <rule_id>\n");
        }
 
 command_write_done:
        }
 
 command_write_done:
@@ -2353,7 +2391,7 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
 {
        struct i40e_pf *pf = filp->private_data;
        int bytes_not_copied;
 {
        struct i40e_pf *pf = filp->private_data;
        int bytes_not_copied;
-       int buf_size = 256;
+       size_t buf_size = 256;
        char *buf;
        int len;
 
        char *buf;
        int len;
 
@@ -2374,8 +2412,8 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
        bytes_not_copied = copy_to_user(buffer, buf, len);
        kfree(buf);
 
        bytes_not_copied = copy_to_user(buffer, buf, len);
        kfree(buf);
 
-       if (bytes_not_copied < 0)
-               return bytes_not_copied;
+       if (bytes_not_copied)
+               return -EFAULT;
 
        *ppos = len;
        return len;
 
        *ppos = len;
        return len;
@@ -2408,10 +2446,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
        memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
        bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
                                          buffer, count);
        memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
        bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
                                          buffer, count);
-       if (bytes_not_copied < 0)
-               return bytes_not_copied;
-       else if (bytes_not_copied > 0)
-               count -= bytes_not_copied;
+       if (bytes_not_copied)
+               return -EFAULT;
        i40e_dbg_netdev_ops_buf[count] = '\0';
 
        buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
        i40e_dbg_netdev_ops_buf[count] = '\0';
 
        buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
@@ -2566,11 +2602,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf)
        if (!pfile)
                goto create_failed;
 
        if (!pfile)
                goto create_failed;
 
-       pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
-                                   &i40e_dbg_dump_fops);
-       if (!pfile)
-               goto create_failed;
-
        pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
                                    &i40e_dbg_netdev_ops_fops);
        if (!pfile)
        pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
                                    &i40e_dbg_netdev_ops_fops);
        if (!pfile)
@@ -2591,9 +2622,6 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf)
 {
        debugfs_remove_recursive(pf->i40e_dbg_pf);
        pf->i40e_dbg_pf = NULL;
 {
        debugfs_remove_recursive(pf->i40e_dbg_pf);
        pf->i40e_dbg_pf = NULL;
-
-       kfree(i40e_dbg_dump_buf);
-       i40e_dbg_dump_buf = NULL;
 }
 
 /**
 }
 
 /**
similarity index 77%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_devids.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_devids.h
index eeaa4d89297171f5937a816c6582ba719b66eea9..23265c1fd9b455c2c27cc3d2a20b323ee41299f8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -32,7 +29,6 @@
 /* Device IDs */
 #define I40E_DEV_ID_SFP_XL710          0x1572
 #define I40E_DEV_ID_QEMU               0x1574
 /* Device IDs */
 #define I40E_DEV_ID_SFP_XL710          0x1572
 #define I40E_DEV_ID_QEMU               0x1574
-#define I40E_DEV_ID_KX_A               0x157F
 #define I40E_DEV_ID_KX_B               0x1580
 #define I40E_DEV_ID_KX_C               0x1581
 #define I40E_DEV_ID_QSFP_A             0x1583
 #define I40E_DEV_ID_KX_B               0x1580
 #define I40E_DEV_ID_KX_C               0x1581
 #define I40E_DEV_ID_QSFP_A             0x1583
 #define I40E_DEV_ID_20G_KR2            0x1587
 #define I40E_DEV_ID_20G_KR2_A          0x1588
 #define I40E_DEV_ID_10G_BASE_T4                0x1589
 #define I40E_DEV_ID_20G_KR2            0x1587
 #define I40E_DEV_ID_20G_KR2_A          0x1588
 #define I40E_DEV_ID_10G_BASE_T4                0x1589
-#define I40E_DEV_ID_VF                 0x154C
-#define I40E_DEV_ID_VF_HV              0x1571
+#define I40E_DEV_ID_25G_B              0x158A
+#define I40E_DEV_ID_25G_SFP28          0x158B
+#define I40E_DEV_ID_KX_X722            0x37CE
+#define I40E_DEV_ID_QSFP_X722          0x37CF
+#define I40E_DEV_ID_SFP_X722           0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
+#define I40E_DEV_ID_SFP_I_X722         0x37D3
+#define I40E_DEV_ID_QSFP_I_X722                0x37D4
 
 #define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
                                         (d) == I40E_DEV_ID_QSFP_B  || \
 
 #define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
                                         (d) == I40E_DEV_ID_QSFP_B  || \
similarity index 95%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_diag.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_diag.c
index e9e65d7dc748d072ef3e2e0d544fcb2cf39e54f1..a3a1d211bc08cfa4955c36ed91c864c0a9537daf 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
similarity index 87%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_diag.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_diag.h
index 047543917e6b5840f7f3f3701feb4401bf2e1807..947a66f25ee320dfe09852a92bfecfc4d3734c4c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
similarity index 65%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_ethtool.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_ethtool.c
index db4565145c674dee7ed955206f213046d781e30a..1456ac589319479d8e5c0caae695c8118f961dea 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -99,6 +96,10 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
        I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
        I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
        I40E_VSI_STAT("tx_linearize", tx_linearize),
        I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
        I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
        I40E_VSI_STAT("tx_linearize", tx_linearize),
+       I40E_VSI_STAT("tx_force_wb", tx_force_wb),
+       I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
+       I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
+       I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
 };
 
 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
 };
 
 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
@@ -153,6 +154,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("rx_oversize", stats.rx_oversize),
        I40E_PF_STAT("rx_jabber", stats.rx_jabber),
        I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
        I40E_PF_STAT("rx_oversize", stats.rx_oversize),
        I40E_PF_STAT("rx_jabber", stats.rx_jabber),
        I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+       I40E_PF_STAT("arq_overflows", arq_overflows),
 #ifdef HAVE_PTP_1588_CLOCK
        I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
 #endif /* HAVE_PTP_1588_CLOCK */
 #ifdef HAVE_PTP_1588_CLOCK
        I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
 #endif /* HAVE_PTP_1588_CLOCK */
@@ -200,11 +202,11 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
 #endif /* I40E_FCOE */
 #define I40E_QUEUE_STATS_LEN(n) \
        (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
 #endif /* I40E_FCOE */
 #define I40E_QUEUE_STATS_LEN(n) \
        (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
-           * 2 /* tx and rx together */                                     \
+           * 2 /* Tx and Rx together */                                     \
            * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
 #define I40E_GLOBAL_STATS_LEN  ARRAY_SIZE(i40e_gstrings_stats)
 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
            * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
 #define I40E_GLOBAL_STATS_LEN  ARRAY_SIZE(i40e_gstrings_stats)
 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
-#define I40E_MISC_STATS_LEN   ARRAY_SIZE(i40e_gstrings_misc_stats)
+#define I40E_MISC_STATS_LEN    ARRAY_SIZE(i40e_gstrings_misc_stats)
 #ifdef I40E_FCOE
 #define I40E_FCOE_STATS_LEN    ARRAY_SIZE(i40e_gstrings_fcoe_stats)
 #define I40E_VSI_STATS_LEN(n)  (I40E_NETDEV_STATS_LEN + \
 #ifdef I40E_FCOE
 #define I40E_FCOE_STATS_LEN    ARRAY_SIZE(i40e_gstrings_fcoe_stats)
 #define I40E_VSI_STATS_LEN(n)  (I40E_NETDEV_STATS_LEN + \
@@ -229,7 +231,7 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
                 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
                 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
                 / sizeof(u64))
                 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
                 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
                 / sizeof(u64))
-#define I40E_VEB_STATS_LEN   ARRAY_SIZE(i40e_gstrings_veb_stats)
+#define I40E_VEB_STATS_LEN     ARRAY_SIZE(i40e_gstrings_veb_stats)
 #define I40E_VEB_STATS_TOTAL   (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
 #define I40E_PF_STATS_LEN(n)   (I40E_GLOBAL_STATS_LEN + \
                                 I40E_PFC_STATS_LEN + \
 #define I40E_VEB_STATS_TOTAL   (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
 #define I40E_PF_STATS_LEN(n)   (I40E_GLOBAL_STATS_LEN + \
                                 I40E_PFC_STATS_LEN + \
@@ -256,13 +258,27 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 
 #endif /* ETHTOOL_TEST */
 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 
 #endif /* ETHTOOL_TEST */
+static int i40e_del_cloud_filter_ethtool(struct i40e_pf *pf,
+                                        struct ethtool_rxnfc *cmd);
 
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = {
+       "MFP",
+       "LinkPolling",
+       "flow-director-atr",
+       "veb-stats",
+       "hw-atr-eviction",
+       "vf-true-promisc-support",
+};
+
+#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl)
+
 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
        "MFP",
        "LinkPolling",
        "flow-director-atr",
        "veb-stats",
 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
        "MFP",
        "LinkPolling",
        "flow-director-atr",
        "veb-stats",
+       "hw-atr-eviction",
 };
 
 #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
 };
 
 #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
@@ -289,7 +305,8 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
  **/
 static void i40e_get_settings_link_up(struct i40e_hw *hw,
                                      struct ethtool_cmd *ecmd,
  **/
 static void i40e_get_settings_link_up(struct i40e_hw *hw,
                                      struct ethtool_cmd *ecmd,
-                                     struct net_device *netdev)
+                                     struct net_device *netdev,
+                                     struct i40e_pf *pf)
 {
        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
        u32 link_speed = hw_link_info->link_speed;
 {
        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
        u32 link_speed = hw_link_info->link_speed;
@@ -338,6 +355,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               /* adding 100baseT support for 10GBASET_PHY */
+               if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) {
+                       ecmd->supported |= SUPPORTED_100baseT_Full;
+                       ecmd->advertising |= ADVERTISED_100baseT_Full |
+                                            ADVERTISED_1000baseT_Full |
+                                            ADVERTISED_10000baseT_Full;
+               }
                break;
        case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
                ecmd->supported = SUPPORTED_Autoneg |
                break;
        case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
                ecmd->supported = SUPPORTED_Autoneg |
@@ -350,6 +374,15 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                                  SUPPORTED_100baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
                        ecmd->advertising |= ADVERTISED_100baseT_Full;
                                  SUPPORTED_100baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
                        ecmd->advertising |= ADVERTISED_100baseT_Full;
+               /* firmware detects 10G phy as 100M phy at 100M speed */
+               if (pf->flags & I40E_FLAG_HAVE_10GBASET_PHY) {
+                       ecmd->supported |= SUPPORTED_10000baseT_Full |
+                                          SUPPORTED_1000baseT_Full;
+                       ecmd->advertising |= ADVERTISED_Autoneg |
+                                            ADVERTISED_100baseT_Full |
+                                            ADVERTISED_1000baseT_Full |
+                                            ADVERTISED_10000baseT_Full;
+               }
                break;
        case I40E_PHY_TYPE_10GBASE_CR1_CU:
        case I40E_PHY_TYPE_10GBASE_CR1:
                break;
        case I40E_PHY_TYPE_10GBASE_CR1_CU:
        case I40E_PHY_TYPE_10GBASE_CR1:
@@ -370,6 +403,21 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                                  SUPPORTED_1000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
                                  SUPPORTED_1000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+                       ecmd->supported |= SUPPORTED_100baseT_Full;
+                       if (hw_link_info->requested_speeds &
+                           I40E_LINK_SPEED_100MB)
+                               ecmd->advertising |= ADVERTISED_100baseT_Full;
+               }
+               break;
+       /* Backplane is set based on supported phy types in get_settings
+        * so don't set anything here but don't warn either
+        */
+       case I40E_PHY_TYPE_40GBASE_KR4:
+       case I40E_PHY_TYPE_20GBASE_KR2:
+       case I40E_PHY_TYPE_10GBASE_KR:
+       case I40E_PHY_TYPE_10GBASE_KX4:
+       case I40E_PHY_TYPE_1000BASE_KX:
                break;
        default:
                /* if we got here and link is up something bad is afoot */
                break;
        default:
                /* if we got here and link is up something bad is afoot */
@@ -408,7 +456,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
  * Reports link settings that can be determined when link is down
  **/
 static void i40e_get_settings_link_down(struct i40e_hw *hw,
  * Reports link settings that can be determined when link is down
  **/
 static void i40e_get_settings_link_down(struct i40e_hw *hw,
-                                     struct ethtool_cmd *ecmd)
+                                       struct ethtool_cmd *ecmd, struct i40e_pf *pf)
 {
        enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
 
 {
        enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
 
@@ -422,6 +470,10 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
                                SUPPORTED_1000baseT_Full;
                ecmd->advertising |= ADVERTISED_Autoneg |
                                     ADVERTISED_1000baseT_Full;
                                SUPPORTED_1000baseT_Full;
                ecmd->advertising |= ADVERTISED_Autoneg |
                                     ADVERTISED_1000baseT_Full;
+               if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+                       ecmd->supported |= SUPPORTED_100baseT_Full;
+                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+               }
        }
        if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
            phy_types & I40E_CAP_PHY_TYPE_XFI ||
        }
        if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
            phy_types & I40E_CAP_PHY_TYPE_XFI ||
@@ -493,9 +545,9 @@ static int i40e_get_settings(struct net_device *netdev,
        bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
 
        if (link_up)
        bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
 
        if (link_up)
-               i40e_get_settings_link_up(hw, ecmd, netdev);
+               i40e_get_settings_link_up(hw, ecmd, netdev, pf);
        else
        else
-               i40e_get_settings_link_down(hw, ecmd);
+               i40e_get_settings_link_down(hw, ecmd, pf);
 
        /* Now set the settings that don't rely on link being up/down */
 
 
        /* Now set the settings that don't rely on link being up/down */
 
@@ -707,8 +759,8 @@ static int i40e_set_settings(struct net_device *netdev,
        } else {
                /* If autoneg is currently enabled */
                if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
        } else {
                /* If autoneg is currently enabled */
                if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
-                       /* If autoneg is supported 10GBASE_T is the only phy that
-                        * can disable it, so otherwise return error
+                       /* If autoneg is supported 10GBASE_T is the only phy
+                        * that can disable it, so otherwise return error
                         */
                        if (safe_ecmd.supported & SUPPORTED_Autoneg &&
                            hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
                         */
                        if (safe_ecmd.supported & SUPPORTED_Autoneg &&
                            hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
@@ -758,12 +810,12 @@ static int i40e_set_settings(struct net_device *netdev,
 
                /* save the requested speeds */
                hw->phy.link_info.requested_speeds = config.link_speed;
 
                /* save the requested speeds */
                hw->phy.link_info.requested_speeds = config.link_speed;
-               /* set link and an so changes take effect */
+               /* set link and auto negotiation so changes take effect */
                config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
                /* If link is up put link down */
                if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
                config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
                /* If link is up put link down */
                if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
-                       /* Tell the OS link is going down, the link will go back up when fw
-                        * says it is ready asynchronously
+                       /* Tell the OS link is going down, the link will go
+                        * back up when fw says it is ready asynchronously
                         */
                        i40e_print_link_message(vsi, false);
                        netif_carrier_off(netdev);
                         */
                        i40e_print_link_message(vsi, false);
                        netif_carrier_off(netdev);
@@ -782,8 +834,8 @@ static int i40e_set_settings(struct net_device *netdev,
                status = i40e_update_link_info(hw);
                if (status)
                        netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
                status = i40e_update_link_info(hw);
                if (status)
                        netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
-                                   i40e_stat_str(hw, status),
-                                   i40e_aq_str(hw, hw->aq.asq_last_status));
+                                  i40e_stat_str(hw, status),
+                                  i40e_aq_str(hw, hw->aq.asq_last_status));
 
        } else {
                netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
 
        } else {
                netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -874,8 +926,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
                return -EOPNOTSUPP;
 
        if (pause->autoneg != (hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
                return -EOPNOTSUPP;
 
        if (pause->autoneg != (hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
-               netdev_info(netdev,
-                       "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+               netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
                return -EOPNOTSUPP;
        }
 
                return -EOPNOTSUPP;
        }
 
@@ -883,8 +934,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
        if (!test_bit(__I40E_DOWN, &pf->state) &&
            !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
                /* Send message that it might not necessarily work*/
        if (!test_bit(__I40E_DOWN, &pf->state) &&
            !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
                /* Send message that it might not necessarily work*/
-               netdev_info(netdev,
-                        "Autoneg did not complete so changing settings may not result in an actual change.\n");
+               netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
        }
 
        if (dcbx_cfg->pfc.pfcenable) {
        }
 
        if (dcbx_cfg->pfc.pfcenable) {
@@ -916,20 +966,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
 
        if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
                netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
 
        if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
                netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
-                        i40e_stat_str(hw, status),
-                        i40e_aq_str(hw, hw->aq.asq_last_status));
+                           i40e_stat_str(hw, status),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
                netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
                netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
-                        i40e_stat_str(hw, status),
-                        i40e_aq_str(hw, hw->aq.asq_last_status));
+                           i40e_stat_str(hw, status),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
                netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
                netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
-                        i40e_stat_str(hw, status),
-                        i40e_aq_str(hw, hw->aq.asq_last_status));
+                           i40e_stat_str(hw, status),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
 
                err = -EAGAIN;
        }
 
@@ -978,27 +1028,24 @@ static int i40e_set_tx_csum(struct net_device *netdev, u32 data)
 #else
                netdev->features |= NETIF_F_IP_CSUM;
 #endif
 #else
                netdev->features |= NETIF_F_IP_CSUM;
 #endif
-               netdev->features |= NETIF_F_SCTP_CSUM;
+               netdev->features |= NETIF_F_SCTP_CRC;
        } else {
 #ifdef NETIF_F_IPV6_CSUM
                netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
        } else {
 #ifdef NETIF_F_IPV6_CSUM
                netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                                     NETIF_F_SCTP_CSUM);
+                                     NETIF_F_SCTP_CRC);
 #else
 #else
-               netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM);
+               netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CRC);
 #endif
        }
 
        return 0;
 }
 
 #endif
        }
 
        return 0;
 }
 
-#ifdef NETIF_F_TSO
 static int i40e_set_tso(struct net_device *netdev, u32 data)
 {
        if (data) {
                netdev->features |= NETIF_F_TSO;
 static int i40e_set_tso(struct net_device *netdev, u32 data)
 {
        if (data) {
                netdev->features |= NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
                netdev->features |= NETIF_F_TSO6;
                netdev->features |= NETIF_F_TSO6;
-#endif
        } else {
 #ifndef HAVE_NETDEV_VLAN_FEATURES
                struct i40e_netdev_priv *np = netdev_priv(netdev);
        } else {
 #ifndef HAVE_NETDEV_VLAN_FEATURES
                struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -1006,14 +1053,13 @@ static int i40e_set_tso(struct net_device *netdev, u32 data)
                if (np->vsi->vlgrp) {
                        int i;
                        struct net_device *v_netdev;
                if (np->vsi->vlgrp) {
                        int i;
                        struct net_device *v_netdev;
+
                        for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
                                v_netdev =
                                       vlan_group_get_device(np->vsi->vlgrp, i);
                                if (v_netdev) {
                                        v_netdev->features &= ~NETIF_F_TSO;
                        for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
                                v_netdev =
                                       vlan_group_get_device(np->vsi->vlgrp, i);
                                if (v_netdev) {
                                        v_netdev->features &= ~NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
                                        v_netdev->features &= ~NETIF_F_TSO6;
                                        v_netdev->features &= ~NETIF_F_TSO6;
-#endif
                                        vlan_group_set_device(np->vsi->vlgrp, i,
                                                              v_netdev);
                                }
                                        vlan_group_set_device(np->vsi->vlgrp, i,
                                                              v_netdev);
                                }
@@ -1021,14 +1067,11 @@ static int i40e_set_tso(struct net_device *netdev, u32 data)
                }
 #endif /* HAVE_NETDEV_VLAN_FEATURES */
                netdev->features &= ~NETIF_F_TSO;
                }
 #endif /* HAVE_NETDEV_VLAN_FEATURES */
                netdev->features &= ~NETIF_F_TSO;
-#ifdef NETIF_F_TSO6
                netdev->features &= ~NETIF_F_TSO6;
                netdev->features &= ~NETIF_F_TSO6;
-#endif
        }
 
        return 0;
 }
        }
 
        return 0;
 }
-#endif /* NETIF_F_TSO */
 #ifdef ETHTOOL_GFLAGS
 static int i40e_set_flags(struct net_device *netdev, u32 data)
 {
 #ifdef ETHTOOL_GFLAGS
 static int i40e_set_flags(struct net_device *netdev, u32 data)
 {
@@ -1100,7 +1143,7 @@ static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
        struct i40e_pf *pf = np->vsi->back;
        struct i40e_hw *hw = &pf->hw;
        u32 *reg_buf = p;
        struct i40e_pf *pf = np->vsi->back;
        struct i40e_hw *hw = &pf->hw;
        u32 *reg_buf = p;
-       int i, j, ri;
+       unsigned int i, j, ri;
        u32 reg;
 
        /* Tell ethtool which driver-version-specific regs output we have.
        u32 reg;
 
        /* Tell ethtool which driver-version-specific regs output we have.
@@ -1143,16 +1186,19 @@ static int i40e_get_eeprom(struct net_device *netdev,
        /* check for NVMUpdate access method */
        magic = hw->vendor_id | (hw->device_id << 16);
        if (eeprom->magic && eeprom->magic != magic) {
        /* check for NVMUpdate access method */
        magic = hw->vendor_id | (hw->device_id << 16);
        if (eeprom->magic && eeprom->magic != magic) {
-               struct i40e_nvm_access *cmd;
-               int errno;
+               struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
+               int errno = 0;
 
                /* make sure it is the right magic for NVMUpdate */
                if ((eeprom->magic >> 16) != hw->device_id)
 
                /* make sure it is the right magic for NVMUpdate */
                if ((eeprom->magic >> 16) != hw->device_id)
-                       return -EINVAL;
+                       errno = -EINVAL;
+               else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+                        test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+                       errno = -EBUSY;
+               else
+                       ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
 
 
-               cmd = (struct i40e_nvm_access *)eeprom;
-               ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-               if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
+               if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
                        dev_info(&pf->pdev->dev,
                                 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                                 ret_val, hw->aq.asq_last_status, errno,
                        dev_info(&pf->pdev->dev,
                                 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                                 ret_val, hw->aq.asq_last_status, errno,
@@ -1236,27 +1282,25 @@ static int i40e_set_eeprom(struct net_device *netdev,
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_hw *hw = &np->vsi->back->hw;
        struct i40e_pf *pf = np->vsi->back;
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_hw *hw = &np->vsi->back->hw;
        struct i40e_pf *pf = np->vsi->back;
-       struct i40e_nvm_access *cmd;
+       struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
        int ret_val = 0;
        int ret_val = 0;
-       int errno;
+       int errno = 0;
        u32 magic;
 
        /* normal ethtool set_eeprom is not supported */
        magic = hw->vendor_id | (hw->device_id << 16);
        if (eeprom->magic == magic)
        u32 magic;
 
        /* normal ethtool set_eeprom is not supported */
        magic = hw->vendor_id | (hw->device_id << 16);
        if (eeprom->magic == magic)
-               return -EOPNOTSUPP;
-
+               errno = -EOPNOTSUPP;
        /* check for NVMUpdate access method */
        /* check for NVMUpdate access method */
-       if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
-               return -EINVAL;
-
-       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
-           test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
-               return -EBUSY;
+       else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
+               errno = -EINVAL;
+       else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+                test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+               errno = -EBUSY;
+       else
+               ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
 
 
-       cmd = (struct i40e_nvm_access *)eeprom;
-       ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-       if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
+       if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
                dev_info(&pf->pdev->dev,
                         "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                         ret_val, hw->aq.asq_last_status, errno,
                dev_info(&pf->pdev->dev,
                         "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                         ret_val, hw->aq.asq_last_status, errno,
@@ -1281,7 +1325,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
                sizeof(drvinfo->bus_info));
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
        strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
                sizeof(drvinfo->bus_info));
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
-       drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
+       if (pf->hw.pf_id == 0)
+               drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN;
+       else
+               drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
 #endif
 }
 
 #endif
 }
 
@@ -1354,7 +1401,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
        /* alloc updated Tx resources */
        if (new_tx_count != vsi->tx_rings[0]->count) {
                netdev_info(netdev,
        /* alloc updated Tx resources */
        if (new_tx_count != vsi->tx_rings[0]->count) {
                netdev_info(netdev,
-                           "Changing Tx descriptor count from %d to %d.\n",
+                           "Changing Tx descriptor count from %d to %d\n",
                            vsi->tx_rings[0]->count, new_tx_count);
                tx_rings = kcalloc(vsi->alloc_queue_pairs,
                                   sizeof(struct i40e_ring), GFP_KERNEL);
                            vsi->tx_rings[0]->count, new_tx_count);
                tx_rings = kcalloc(vsi->alloc_queue_pairs,
                                   sizeof(struct i40e_ring), GFP_KERNEL);
@@ -1399,6 +1446,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
                }
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                }
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       /* this is to allow wr32 to have something to write to
+                        * during early allocation of rx buffers
+                        */
+                       u32 __iomem faketail = 0;
+                       struct i40e_ring *ring;
+                       u16 unused;
+
                        /* clone ring and setup updated count */
                        rx_rings[i] = *vsi->rx_rings[i];
                        rx_rings[i].count = new_rx_count;
                        /* clone ring and setup updated count */
                        rx_rings[i] = *vsi->rx_rings[i];
                        rx_rings[i].count = new_rx_count;
@@ -1407,12 +1461,22 @@ static int i40e_set_ringparam(struct net_device *netdev,
                         */
                        rx_rings[i].desc = NULL;
                        rx_rings[i].rx_bi = NULL;
                         */
                        rx_rings[i].desc = NULL;
                        rx_rings[i].rx_bi = NULL;
+                       rx_rings[i].tail = (u8 __iomem *)&faketail;
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
+                       if (err)
+                               goto rx_unwind;
+
+                       /* now allocate the rx buffers to make sure the OS
+                        * has enough memory, any failure here means abort
+                        */
+                       ring = &rx_rings[i];
+                       unused = I40E_DESC_UNUSED(ring);
+                       err = i40e_alloc_rx_buffers(ring, unused);
+rx_unwind:
                        if (err) {
                        if (err) {
-                               while (i) {
-                                       i--;
+                               do {
                                        i40e_free_rx_resources(&rx_rings[i]);
                                        i40e_free_rx_resources(&rx_rings[i]);
-                               }
+                               } while (i--);
                                kfree(rx_rings);
                                rx_rings = NULL;
 
                                kfree(rx_rings);
                                rx_rings = NULL;
 
@@ -1438,6 +1502,17 @@ static int i40e_set_ringparam(struct net_device *netdev,
        if (rx_rings) {
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        i40e_free_rx_resources(vsi->rx_rings[i]);
        if (rx_rings) {
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        i40e_free_rx_resources(vsi->rx_rings[i]);
+                       /* get the real tail offset */
+                       rx_rings[i].tail = vsi->rx_rings[i]->tail;
+                       /* this is to fake out the allocation routine
+                        * into thinking it has to realloc everything
+                        * but the recycling logic will let us re-use
+                        * the buffers allocated above
+                        */
+                       rx_rings[i].next_to_use = 0;
+                       rx_rings[i].next_to_clean = 0;
+                       rx_rings[i].next_to_alloc = 0;
+                       /* do a struct copy */
                        *vsi->rx_rings[i] = rx_rings[i];
                }
                kfree(rx_rings);
                        *vsi->rx_rings[i] = rx_rings[i];
                }
                kfree(rx_rings);
@@ -1500,7 +1575,10 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
                        return I40E_VSI_STATS_LEN(netdev);
                }
        case ETH_SS_PRIV_FLAGS:
                        return I40E_VSI_STATS_LEN(netdev);
                }
        case ETH_SS_PRIV_FLAGS:
-               return I40E_PRIV_FLAGS_STR_LEN;
+               if (pf->hw.pf_id == 0)
+                       return I40E_PRIV_FLAGS_GL_STR_LEN;
+               else
+                       return I40E_PRIV_FLAGS_STR_LEN;
        default:
                return -EOPNOTSUPP;
        }
        default:
                return -EOPNOTSUPP;
        }
@@ -1516,7 +1594,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
        struct i40e_pf *pf = vsi->back;
        int i = 0;
        char *p;
        struct i40e_pf *pf = vsi->back;
        int i = 0;
        char *p;
-       int j;
+       unsigned int j;
 
 #ifdef HAVE_NDO_GET_STATS64
        struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
 
 #ifdef HAVE_NDO_GET_STATS64
        struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
@@ -1535,7 +1613,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
        for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
                p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
                data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
        for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
                p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
                data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
-                       sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+                           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
 #ifdef I40E_FCOE
        for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
        }
 #ifdef I40E_FCOE
        for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
@@ -1577,39 +1655,41 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                i += 2;
        }
        rcu_read_unlock();
                i += 2;
        }
        rcu_read_unlock();
-       if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
-               if ((pf->lan_veb != I40E_NO_VEB) &&
-                   (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
-                       struct i40e_veb *veb = pf->veb[pf->lan_veb];
+       if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+               return;
 
 
-                       for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
-                               p = (char *)veb + i40e_gstrings_veb_stats[j].stat_offset;
-                               data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
-                                          sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-                       }
-                       for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
-                               data[i++] = veb->tc_stats.tc_tx_packets[j];
-                               data[i++] = veb->tc_stats.tc_tx_bytes[j];
-                               data[i++] = veb->tc_stats.tc_rx_packets[j];
-                               data[i++] = veb->tc_stats.tc_rx_bytes[j];
-                       }
-               }
-               for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
-                       p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
-                       data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
-                                  sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-               }
-               for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
-                       data[i++] = pf->stats.priority_xon_tx[j];
-                       data[i++] = pf->stats.priority_xoff_tx[j];
+       if ((pf->lan_veb != I40E_NO_VEB) &&
+           (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
+               struct i40e_veb *veb = pf->veb[pf->lan_veb];
+
+               for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
+                       p = (char *)veb;
+                       p += i40e_gstrings_veb_stats[j].stat_offset;
+                       data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
+                                    sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
                }
                }
-               for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
-                       data[i++] = pf->stats.priority_xon_rx[j];
-                       data[i++] = pf->stats.priority_xoff_rx[j];
+               for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
+                       data[i++] = veb->tc_stats.tc_tx_packets[j];
+                       data[i++] = veb->tc_stats.tc_tx_bytes[j];
+                       data[i++] = veb->tc_stats.tc_rx_packets[j];
+                       data[i++] = veb->tc_stats.tc_rx_bytes[j];
                }
                }
-               for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
-                       data[i++] = pf->stats.priority_xon_2_xoff[j];
        }
        }
+       for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+               p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+               data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+                            sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       }
+       for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+               data[i++] = pf->stats.priority_xon_tx[j];
+               data[i++] = pf->stats.priority_xoff_tx[j];
+       }
+       for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+               data[i++] = pf->stats.priority_xon_rx[j];
+               data[i++] = pf->stats.priority_xoff_rx[j];
+       }
+       for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+               data[i++] = pf->stats.priority_xon_2_xoff[j];
 }
 
 static void i40e_get_strings(struct net_device *netdev, u32 stringset,
 }
 
 static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -1619,7 +1699,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
        char *p = (char *)data;
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
        char *p = (char *)data;
-       int i;
+       unsigned int i;
 
        switch (stringset) {
        case ETH_SS_TEST:
 
        switch (stringset) {
        case ETH_SS_TEST:
@@ -1656,64 +1736,73 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
                        p += ETH_GSTRING_LEN;
                }
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
                        p += ETH_GSTRING_LEN;
                }
-               if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
-                       if ((pf->lan_veb != I40E_NO_VEB) &&
-                           (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
-                               for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
-                                       snprintf(p, ETH_GSTRING_LEN, "veb.%s",
-                                                i40e_gstrings_veb_stats[i].stat_string);
-                                       p += ETH_GSTRING_LEN;
-                               }
-                               for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                                       snprintf(p, ETH_GSTRING_LEN,
-                                                "veb.tc_%u_tx_packets", i);
-                                       p += ETH_GSTRING_LEN;
-                                       snprintf(p, ETH_GSTRING_LEN,
-                                                "veb.tc_%u_tx_bytes", i);
-                                       p += ETH_GSTRING_LEN;
-                                       snprintf(p, ETH_GSTRING_LEN,
-                                                "veb.tc_%u_rx_packets", i);
-                                       p += ETH_GSTRING_LEN;
-                                       snprintf(p, ETH_GSTRING_LEN,
-                                                "veb.tc_%u_rx_bytes", i);
-                                       p += ETH_GSTRING_LEN;
-                               }
-                       }
-                       for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
-                               snprintf(p, ETH_GSTRING_LEN, "port.%s",
-                                        i40e_gstrings_stats[i].stat_string);
+               if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+                       return;
+
+               if ((pf->lan_veb != I40E_NO_VEB) &&
+                   (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
+                       for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
+                               snprintf(p, ETH_GSTRING_LEN, "veb.%s",
+                                       i40e_gstrings_veb_stats[i].stat_string);
                                p += ETH_GSTRING_LEN;
                        }
                                p += ETH_GSTRING_LEN;
                        }
-                       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
-                               snprintf(p, ETH_GSTRING_LEN,
-                                        "port.tx_priority_%u_xon", i);
-                               p += ETH_GSTRING_LEN;
+                       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
                                snprintf(p, ETH_GSTRING_LEN,
                                snprintf(p, ETH_GSTRING_LEN,
-                                        "port.tx_priority_%u_xoff", i);
+                                        "veb.tc_%u_tx_packets", i);
                                p += ETH_GSTRING_LEN;
                                p += ETH_GSTRING_LEN;
-                       }
-                       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
                                snprintf(p, ETH_GSTRING_LEN,
                                snprintf(p, ETH_GSTRING_LEN,
-                                        "port.rx_priority_%u_xon", i);
+                                        "veb.tc_%u_tx_bytes", i);
                                p += ETH_GSTRING_LEN;
                                snprintf(p, ETH_GSTRING_LEN,
                                p += ETH_GSTRING_LEN;
                                snprintf(p, ETH_GSTRING_LEN,
-                                        "port.rx_priority_%u_xoff", i);
+                                        "veb.tc_%u_rx_packets", i);
                                p += ETH_GSTRING_LEN;
                                p += ETH_GSTRING_LEN;
-                       }
-                       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
                                snprintf(p, ETH_GSTRING_LEN,
                                snprintf(p, ETH_GSTRING_LEN,
-                                        "port.rx_priority_%u_xon_2_xoff", i);
+                                        "veb.tc_%u_rx_bytes", i);
                                p += ETH_GSTRING_LEN;
                        }
                }
                                p += ETH_GSTRING_LEN;
                        }
                }
+               for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+                       snprintf(p, ETH_GSTRING_LEN, "port.%s",
+                                i40e_gstrings_stats[i].stat_string);
+                       p += ETH_GSTRING_LEN;
+               }
+               for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+                       snprintf(p, ETH_GSTRING_LEN,
+                                "port.tx_priority_%u_xon", i);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN,
+                                "port.tx_priority_%u_xoff", i);
+                       p += ETH_GSTRING_LEN;
+               }
+               for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+                       snprintf(p, ETH_GSTRING_LEN,
+                                "port.rx_priority_%u_xon", i);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN,
+                                "port.rx_priority_%u_xoff", i);
+                       p += ETH_GSTRING_LEN;
+               }
+               for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+                       snprintf(p, ETH_GSTRING_LEN,
+                                "port.rx_priority_%u_xon_2_xoff", i);
+                       p += ETH_GSTRING_LEN;
+               }
                /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
                break;
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
        case ETH_SS_PRIV_FLAGS:
                /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
                break;
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
        case ETH_SS_PRIV_FLAGS:
-               for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
-                       memcpy(data, i40e_priv_flags_strings[i],
-                              ETH_GSTRING_LEN);
-                       data += ETH_GSTRING_LEN;
+               if (pf->hw.pf_id == 0) {
+                       for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) {
+                               memcpy(data, i40e_priv_flags_strings_gl[i],
+                                      ETH_GSTRING_LEN);
+                               data += ETH_GSTRING_LEN;
+                       }
+               } else {
+                       for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+                               memcpy(data, i40e_priv_flags_strings[i],
+                                      ETH_GSTRING_LEN);
+                               data += ETH_GSTRING_LEN;
+                       }
                }
                break;
 #endif
                }
                break;
 #endif
@@ -1907,7 +1996,7 @@ static void i40e_diag_test(struct net_device *netdev,
                /* If the device is online then take it offline */
                if (if_running)
                        /* indicate we're in test mode */
                /* If the device is online then take it offline */
                if (if_running)
                        /* indicate we're in test mode */
-                       dev_close(netdev);
+                       i40e_close(netdev);
                else
                        /* This reset does not affect link - if it is
                         * changed to a type of reset that does affect
                else
                        /* This reset does not affect link - if it is
                         * changed to a type of reset that does affect
@@ -1936,7 +2025,7 @@ static void i40e_diag_test(struct net_device *netdev,
                i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
                if (if_running)
                i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
                if (if_running)
-                       dev_open(netdev);
+                       i40e_open(netdev);
        } else {
                /* Online tests */
                netif_info(pf, drv, netdev, "online testing starting\n");
        } else {
                /* Online tests */
                netif_info(pf, drv, netdev, "online testing starting\n");
@@ -2020,28 +2109,52 @@ static int i40e_set_phys_id(struct net_device *netdev,
                            enum ethtool_phys_id_state state)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
                            enum ethtool_phys_id_state state)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
+       i40e_status ret = I40E_SUCCESS;
        struct i40e_pf *pf = np->vsi->back;
        struct i40e_hw *hw = &pf->hw;
        int blink_freq = 2;
        struct i40e_pf *pf = np->vsi->back;
        struct i40e_hw *hw = &pf->hw;
        int blink_freq = 2;
+       u16 temp_status;
 
        switch (state) {
        case ETHTOOL_ID_ACTIVE:
 
        switch (state) {
        case ETHTOOL_ID_ACTIVE:
-               pf->led_status = i40e_led_get(hw);
+               if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
+                       pf->led_status = i40e_led_get(hw);
+               } else {
+                       i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
+                       ret = i40e_led_get_phy(hw, &temp_status,
+                                              &pf->phy_led_val);
+                       pf->led_status = temp_status;
+               }
                return blink_freq;
        case ETHTOOL_ID_ON:
                return blink_freq;
        case ETHTOOL_ID_ON:
-               i40e_led_set(hw, 0xF, false);
+               if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+                       i40e_led_set(hw, 0xf, false);
+               else
+                       ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
                break;
        case ETHTOOL_ID_OFF:
                break;
        case ETHTOOL_ID_OFF:
-               i40e_led_set(hw, 0x0, false);
+               if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+                       i40e_led_set(hw, 0x0, false);
+               else
+                       ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
                break;
        case ETHTOOL_ID_INACTIVE:
                break;
        case ETHTOOL_ID_INACTIVE:
-               i40e_led_set(hw, pf->led_status, false);
+               if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
+                       i40e_led_set(hw, false, pf->led_status);
+               } else {
+                       ret = i40e_led_set_phy(hw, false, pf->led_status,
+                                              (pf->phy_led_val |
+                                              I40E_PHY_LED_MODE_ORIG));
+                       i40e_aq_set_phy_debug(hw, 0, NULL);
+               }
                break;
        default:
                break;
        }
                break;
        default:
                break;
        }
-
-       return 0;
+               if (ret)
+                       return -ENOENT;
+               else
+                       return 0;
 }
 #else /* HAVE_ETHTOOL_SET_PHYS_ID */
 static int i40e_phys_id(struct net_device *netdev, u32 data)
 }
 #else /* HAVE_ETHTOOL_SET_PHYS_ID */
 static int i40e_phys_id(struct net_device *netdev, u32 data)
@@ -2049,23 +2162,45 @@ static int i40e_phys_id(struct net_device *netdev, u32 data)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
        struct i40e_hw *hw = &pf->hw;
+       i40e_status ret = I40E_SUCCESS;
+       u16 temp_status;
        int i;
 
        int i;
 
-       pf->led_status = i40e_led_get(hw);
+       if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
+               pf->led_status = i40e_led_get(hw);
+       } else {
+               ret = i40e_led_get_phy(hw, &temp_status,
+                                      &pf->phy_led_val);
+               pf->led_status = temp_status;
+       }
 
        if (!data || data > 300)
                data = 300;
 
 
        if (!data || data > 300)
                data = 300;
 
+       /* 10GBaseT PHY controls led's through PHY, not MAC */
        for (i = 0; i < (data * 1000); i += 400) {
        for (i = 0; i < (data * 1000); i += 400) {
-               i40e_led_set(hw, 0xF, false);
+               if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+                       i40e_led_set(hw, 0xF, false);
+               else
+                       ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
                msleep_interruptible(200);
                msleep_interruptible(200);
-               i40e_led_set(hw, 0x0, false);
+               if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+                       i40e_led_set(hw, 0x0, false);
+               else
+                       ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
                msleep_interruptible(200);
        }
                msleep_interruptible(200);
        }
+       if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+               i40e_led_set(hw, pf->led_status, false);
+       else
+               ret = i40e_led_set_phy(hw, false, pf->led_status,
+                                      (pf->led_status |
+                                      I40E_PHY_LED_MODE_ORIG));
 
 
-       i40e_led_set(hw, pf->led_status, false);
-
-       return 0;
+       if (ret)
+               return -ENOENT;
+       else
+               return 0;
 }
 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
 
 }
 #endif /* HAVE_ETHTOOL_SET_PHYS_ID */
 
@@ -2202,46 +2337,77 @@ static int i40e_set_rx_ntuple(struct net_device *dev,
  **/
 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
 {
  **/
 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
 {
+       struct i40e_hw *hw = &pf->hw;
+       u8 flow_pctype = 0;
+       u64 i_set = 0;
+
        cmd->data = 0;
 
        cmd->data = 0;
 
-       if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) {
-               cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data;
-               cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type;
-               return 0;
-       }
-       /* Report default options for RSS on i40e */
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
+               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+               break;
        case UDP_V4_FLOW:
        case UDP_V4_FLOW:
-               cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-       /* fall through to add IP fields */
+               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+               break;
+       case TCP_V6_FLOW:
+               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+               break;
+       case UDP_V6_FLOW:
+               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+               break;
        case SCTP_V4_FLOW:
        case AH_ESP_V4_FLOW:
        case AH_V4_FLOW:
        case ESP_V4_FLOW:
        case IPV4_FLOW:
        case SCTP_V4_FLOW:
        case AH_ESP_V4_FLOW:
        case AH_V4_FLOW:
        case ESP_V4_FLOW:
        case IPV4_FLOW:
-               cmd->data |= RXH_IP_SRC | RXH_IP_DST;
-               break;
-       case TCP_V6_FLOW:
-       case UDP_V6_FLOW:
-               cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-       /* fall through to add IP fields */
        case SCTP_V6_FLOW:
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
        case ESP_V6_FLOW:
        case IPV6_FLOW:
        case SCTP_V6_FLOW:
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
        case ESP_V6_FLOW:
        case IPV6_FLOW:
+               /* Default is src/dest for IP, no matter the L4 hashing */
                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
                break;
        default:
                return -EINVAL;
        }
 
                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
                break;
        default:
                return -EINVAL;
        }
 
+       /* Read flow based hash input set register */
+       if (flow_pctype) {
+               i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
+                                             flow_pctype)) |
+                       ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
+                                              flow_pctype)) << 32);
+       }
+
+       /* Process bits of hash input set */
+       if (i_set) {
+               if (i_set & BIT_ULL(I40E_L4_SRC_SHIFT))
+                       cmd->data |= RXH_L4_B_0_1;
+               if (i_set & BIT_ULL(I40E_L4_DST_SHIFT))
+                       cmd->data |= RXH_L4_B_2_3;
+
+               if (cmd->flow_type == TCP_V4_FLOW ||
+                   cmd->flow_type == UDP_V4_FLOW) {
+                       if (i_set & BIT_ULL(I40E_L3_SRC_SHIFT))
+                               cmd->data |= RXH_IP_SRC;
+                       if (i_set & BIT_ULL(I40E_L3_DST_SHIFT))
+                               cmd->data |= RXH_IP_DST;
+               } else if (cmd->flow_type == TCP_V6_FLOW ||
+                         cmd->flow_type == UDP_V6_FLOW) {
+                       if (i_set & BIT_ULL(I40E_L3_V6_SRC_SHIFT))
+                               cmd->data |= RXH_IP_SRC;
+                       if (i_set & BIT_ULL(I40E_L3_V6_DST_SHIFT))
+                               cmd->data |= RXH_IP_DST;
+               }
+       }
+
        return 0;
 }
 
 /**
        return 0;
 }
 
 /**
- * i40e_get_ethtool_fdir_all - Populates the rule count of a command
+ * i40e_get_rx_filter_ids - Populates the rule count of a command
  * @pf: Pointer to the physical function struct
  * @cmd: The command to get or set Rx flow classification rules
  * @rule_locs: Array of used rule locations
  * @pf: Pointer to the physical function struct
  * @cmd: The command to get or set Rx flow classification rules
  * @rule_locs: Array of used rule locations
@@ -2251,23 +2417,34 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
  *
  * Returns 0 on success or -EMSGSIZE if entry not found
  **/
  *
  * Returns 0 on success or -EMSGSIZE if entry not found
  **/
-static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
-                                    struct ethtool_rxnfc *cmd,
-                                    u32 *rule_locs)
+static int i40e_get_rx_filter_ids(struct i40e_pf *pf,
+                                 struct ethtool_rxnfc *cmd,
+                                 u32 *rule_locs)
 {
 {
-       struct i40e_fdir_filter *rule;
+       struct i40e_fdir_filter *f_rule;
+       struct i40e_cloud_filter *c_rule;
        struct hlist_node *node2;
        struct hlist_node *node2;
-       int cnt = 0;
+       unsigned int cnt = 0;
 
        /* report total rule count */
        cmd->data = i40e_get_fd_cnt_all(pf);
 
 
        /* report total rule count */
        cmd->data = i40e_get_fd_cnt_all(pf);
 
-       hlist_for_each_entry_safe(rule, node2,
+       hlist_for_each_entry_safe(f_rule, node2,
                                  &pf->fdir_filter_list, fdir_node) {
                if (cnt == cmd->rule_cnt)
                        return -EMSGSIZE;
 
                                  &pf->fdir_filter_list, fdir_node) {
                if (cnt == cmd->rule_cnt)
                        return -EMSGSIZE;
 
-               rule_locs[cnt] = rule->fd_id;
+               rule_locs[cnt] = f_rule->fd_id;
+               cnt++;
+       }
+
+       /* find the cloud filter rule ids */
+       hlist_for_each_entry_safe(c_rule, node2,
+                                 &pf->cloud_filter_list, cloud_node) {
+               if (cnt == cmd->rule_cnt)
+                       return -EMSGSIZE;
+
+               rule_locs[cnt] = c_rule->id;
                cnt++;
        }
 
                cnt++;
        }
 
@@ -2328,85 +2505,104 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
 
                vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
                if (vsi && vsi->type == I40E_VSI_SRIOV) {
 
                vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
                if (vsi && vsi->type == I40E_VSI_SRIOV) {
-                       fsp->h_ext.data[1] = htonl(vsi->vf_id);
-                       fsp->m_ext.data[1] = htonl(0x1);
+                       fsp->h_ext.data[1] = cpu_to_be32(vsi->vf_id);
+                       fsp->m_ext.data[1] = cpu_to_be32(0x1);
                }
        }
 
                }
        }
 
+       /* Present the value of user-def as part of get filters */
+       if (i40e_is_flex_filter(rule)) {
+               fsp->h_ext.data[1] = (__be32)((rule->flex_bytes[3] << 16) |
+                                     rule->flex_bytes[2]);
+               fsp->m_ext.data[1] = (__be32)((rule->flex_mask[3] << 16) |
+                                     rule->flex_mask[2]);
+               fsp->flow_type |= FLOW_EXT;
+       }
+
        return 0;
 }
 
        return 0;
 }
 
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
 #define VXLAN_PORT     8472
 
 /**
 #define VXLAN_PORT     8472
 
 /**
- * i40e_get_vxlan_filter_ethtool - get a vxlan filter by loc
+ * i40e_get_cloud_filter_entry - get a cloud filter by loc
  * @pf: pointer to the physical function struct
  * @cmd: The command to get or set Rx flow classification rules
  *
  * @pf: pointer to the physical function struct
  * @cmd: The command to get or set Rx flow classification rules
  *
- * get vxlan filter by loc.
+ * get cloud filter by loc.
  * Returns 0 if success.
  **/
  * Returns 0 if success.
  **/
-static int i40e_get_vxlan_filter_ethtool(struct i40e_pf *pf,
-                                        struct ethtool_rxnfc *cmd)
+static int i40e_get_cloud_filter_entry(struct i40e_pf *pf,
+                                      struct ethtool_rxnfc *cmd)
 {
        struct ethtool_rx_flow_spec *fsp =
                        (struct ethtool_rx_flow_spec *)&cmd->fs;
        static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
 {
        struct ethtool_rx_flow_spec *fsp =
                        (struct ethtool_rx_flow_spec *)&cmd->fs;
        static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
-       struct i40e_cloud_filter *rule, *pfilter = NULL;
-       struct i40e_vsi *dst_vsi;
+       struct i40e_cloud_filter *rule, *filter = NULL;
        struct hlist_node *node2;
        __be32 tena;
        struct hlist_node *node2;
        __be32 tena;
+       int i;
 
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->cloud_filter_list, cloud_node) {
 
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->cloud_filter_list, cloud_node) {
-               /* filter found with the id */
-               if (rule->id == fsp->location) {
-                       pfilter = rule;
+               /* filter found */
+               if (rule->id == fsp->location)
+                       filter = rule;
+
+               /* bail out if we've passed the likely location in the list */
+               if (rule->id >= fsp->location)
                        break;
                        break;
-               }
+
        }
        }
-       if (!pfilter) {
+       if (!filter) {
                dev_info(&pf->pdev->dev, "No cloud filter with loc %d\n",
                        fsp->location);
                return -ENOENT;
        }
 
                dev_info(&pf->pdev->dev, "No cloud filter with loc %d\n",
                        fsp->location);
                return -ENOENT;
        }
 
-       dst_vsi = i40e_find_vsi_from_id(pf, pfilter->vsi_id);
-       if (dst_vsi && dst_vsi->type == I40E_VSI_SRIOV) {
-               fsp->h_ext.data[1] = htonl(dst_vsi->vf_id);
-               fsp->m_ext.data[1] = htonl(0x1);
+       /* check for VF as a cloud filter target */
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (!pf->vsi[i] || pf->vsi[i]->seid != filter->seid)
+                       continue;
+
+               if (pf->vsi[i]->type == I40E_VSI_SRIOV)
+                       fsp->h_ext.data[1] = cpu_to_be32(pf->vsi[i]->vf_id);
+               else if (pf->vsi[i]->type == I40E_VSI_MAIN)
+                       fsp->h_ext.data[1] = cpu_to_be32(0xffff);
+               break;
        }
 
        }
 
-       ether_addr_copy(fsp->h_u.ether_spec.h_dest, pfilter->outer_mac);
-       ether_addr_copy(fsp->h_u.ether_spec.h_source, pfilter->inner_mac);
-       fsp->h_u.usr_ip4_spec.ip4dst = pfilter->inner_ip[0];
-       fsp->h_ext.vlan_tci = pfilter->inner_vlan;
+       ether_addr_copy(fsp->h_u.ether_spec.h_dest, filter->outer_mac);
+       ether_addr_copy(fsp->h_u.ether_spec.h_source, filter->inner_mac);
 
 
-       tena = htonl(pfilter->tenant_id);
+       tena = cpu_to_be32(filter->tenant_id);
        memcpy(&fsp->h_ext.data[0], &tena, sizeof(tena));
 
        memcpy(&fsp->h_ext.data[0], &tena, sizeof(tena));
 
-       fsp->ring_cookie = pfilter->queue_id;
-       if (pfilter->flags & I40E_CLOUD_FIELD_OMAC)
+       fsp->ring_cookie = filter->queue_id;
+       if (filter->flags & I40E_CLOUD_FIELD_OMAC)
                ether_addr_copy(fsp->m_u.ether_spec.h_dest, mac_broadcast);
                ether_addr_copy(fsp->m_u.ether_spec.h_dest, mac_broadcast);
-       if (pfilter->flags & I40E_CLOUD_FIELD_IMAC)
+       if (filter->flags & I40E_CLOUD_FIELD_IMAC)
                ether_addr_copy(fsp->m_u.ether_spec.h_source, mac_broadcast);
                ether_addr_copy(fsp->m_u.ether_spec.h_source, mac_broadcast);
-       if (pfilter->flags & I40E_CLOUD_FIELD_IVLAN)
-               fsp->m_ext.vlan_tci = htons(0x7fff);
-       if (pfilter->flags & I40E_CLOUD_FIELD_TEN_ID)
-               *(__be32 *)&fsp->m_ext.data[0] = htonl(0x1);
-       if (pfilter->flags & I40E_CLOUD_FIELD_IIP) {
+       if (filter->flags & I40E_CLOUD_FIELD_IVLAN)
+               fsp->h_ext.vlan_tci = filter->inner_vlan;
+       if (filter->flags & I40E_CLOUD_FIELD_TEN_ID)
+               *(__be32 *)&fsp->h_ext.data[0] = cpu_to_be32(filter->tenant_id);
+       else
+               *(__be32 *)&fsp->h_ext.data[0] = cpu_to_be32(~0);
+
+       if (filter->flags & I40E_CLOUD_FIELD_IIP) {
                fsp->flow_type = IP_USER_FLOW;
                fsp->flow_type = IP_USER_FLOW;
+               fsp->h_u.usr_ip4_spec.ip4dst = filter->inner_ip[0];
                fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
                fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
-       } else
+       } else {
                fsp->flow_type = ETHER_FLOW;
                fsp->flow_type = ETHER_FLOW;
+       }
 
 
-       fsp->flow_type |= FLOW_MAC_EXT;
+       fsp->flow_type |= FLOW_EXT;
 
        return 0;
 }
 
 
        return 0;
 }
 
-#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
 /**
  * i40e_get_rxnfc - command to get RX flow classification rules
  * @netdev: network interface device structure
 /**
  * i40e_get_rxnfc - command to get RX flow classification rules
  * @netdev: network interface device structure
@@ -2428,7 +2624,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
 
        switch (cmd->cmd) {
        case ETHTOOL_GRXRINGS:
 
        switch (cmd->cmd) {
        case ETHTOOL_GRXRINGS:
-               cmd->data = vsi->alloc_queue_pairs;
+               cmd->data = vsi->num_queue_pairs;
                ret = 0;
                break;
        case ETHTOOL_GRXFH:
                ret = 0;
                break;
        case ETHTOOL_GRXFH:
@@ -2436,21 +2632,22 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
                break;
        case ETHTOOL_GRXCLSRLCNT:
                cmd->rule_cnt = pf->fdir_pf_active_filters;
                break;
        case ETHTOOL_GRXCLSRLCNT:
                cmd->rule_cnt = pf->fdir_pf_active_filters;
+               cmd->rule_cnt += pf->num_cloud_filters;
                /* report total rule count */
                cmd->data = i40e_get_fd_cnt_all(pf);
                ret = 0;
                break;
        case ETHTOOL_GRXCLSRULE:
                /* report total rule count */
                cmd->data = i40e_get_fd_cnt_all(pf);
                ret = 0;
                break;
        case ETHTOOL_GRXCLSRULE:
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
-               ret = i40e_get_vxlan_filter_ethtool(pf, cmd);
-#endif
                ret = i40e_get_ethtool_fdir_entry(pf, cmd);
                ret = i40e_get_ethtool_fdir_entry(pf, cmd);
+               /* if no such fdir filter then try the cloud list */
+               if (ret)
+                       ret = i40e_get_cloud_filter_entry(pf, cmd);
                break;
        case ETHTOOL_GRXCLSRLALL:
 #ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
                break;
        case ETHTOOL_GRXCLSRLALL:
 #ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
-               ret = i40e_get_ethtool_fdir_all(pf, cmd, (u32 *)rule_locs);
+               ret = i40e_get_rx_filter_ids(pf, cmd, (u32 *)rule_locs);
 #else
 #else
-               ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
+               ret = i40e_get_rx_filter_ids(pf, cmd, rule_locs);
 #endif
                break;
        default:
 #endif
                break;
        default:
@@ -2460,6 +2657,51 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
        return ret;
 }
 
        return ret;
 }
 
+/**
+ * i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @nfc: pointer to user request
+ * @i_setc bits currently set
+ *
+ * Returns value of bits to be set per user request
+ **/
+static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+{
+       u64 i_set = i_setc;
+       u64 src_l3 = 0, dst_l3 = 0;
+
+       if (nfc->data & RXH_L4_B_0_1)
+               i_set |= BIT_ULL(I40E_L4_SRC_SHIFT);
+       else
+               i_set &= ~BIT_ULL(I40E_L4_SRC_SHIFT);
+       if (nfc->data & RXH_L4_B_2_3)
+               i_set |= BIT_ULL(I40E_L4_DST_SHIFT);
+       else
+               i_set &= ~BIT_ULL(I40E_L4_DST_SHIFT);
+
+       if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) {
+               src_l3 = I40E_L3_V6_SRC_SHIFT;
+               dst_l3 = I40E_L3_V6_DST_SHIFT;
+       } else if (nfc->flow_type == TCP_V4_FLOW ||
+                 nfc->flow_type == UDP_V4_FLOW) {
+               src_l3 = I40E_L3_SRC_SHIFT;
+               dst_l3 = I40E_L3_DST_SHIFT;
+       } else {
+               /* Any other flow type are not supported here */
+               return i_set;
+       }
+
+       if (nfc->data & RXH_IP_SRC)
+               i_set |= BIT_ULL(src_l3);
+       else
+               i_set &= ~BIT_ULL(src_l3);
+       if (nfc->data & RXH_IP_DST)
+               i_set |= BIT_ULL(dst_l3);
+       else
+               i_set &= ~BIT_ULL(dst_l3);
+
+       return i_set;
+}
+
 /**
  * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
  * @pf: pointer to the physical function struct
 /**
  * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
  * @pf: pointer to the physical function struct
@@ -2470,8 +2712,16 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
 {
        struct i40e_hw *hw = &pf->hw;
 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
 {
        struct i40e_hw *hw = &pf->hw;
-       u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
-                  ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+       u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+                  ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+       u8 flow_pctype = 0;
+       u64 i_set, i_setc;
+
+       if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+               dev_err(&pf->pdev->dev,
+                       "Change of RSS hash input set is not supported when MFP mode is enabled\n");
+               return -EOPNOTSUPP;
+       }
 
        /* RSS does not support anything other than hashing
         * to queues on src and dst IPs and ports
 
        /* RSS does not support anything other than hashing
         * to queues on src and dst IPs and ports
@@ -2480,63 +2730,39 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
                          RXH_L4_B_0_1 | RXH_L4_B_2_3))
                return -EINVAL;
 
                          RXH_L4_B_0_1 | RXH_L4_B_2_3))
                return -EINVAL;
 
-       /* We need at least the IP SRC and DEST fields for hashing */
-       if (!(nfc->data & RXH_IP_SRC) ||
-           !(nfc->data & RXH_IP_DST))
-               return -EINVAL;
-
        switch (nfc->flow_type) {
        case TCP_V4_FLOW:
        switch (nfc->flow_type) {
        case TCP_V4_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-                       break;
-               default:
-                       return -EINVAL;
-               }
+               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+               if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                       hena |=
+                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
                break;
        case TCP_V6_FLOW:
                break;
        case TCP_V6_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-                       break;
-               default:
-                       return -EINVAL;
-               }
+               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+               if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                       hena |=
+                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+               if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                       hena |=
+                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
                break;
        case UDP_V4_FLOW:
                break;
        case UDP_V4_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-                       break;
-               default:
-                       return -EINVAL;
-               }
+               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+               if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                       hena |=
+                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
                break;
        case UDP_V6_FLOW:
                break;
        case UDP_V6_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-                       break;
-               default:
-                       return -EINVAL;
-               }
+               flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+               if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                       hena |=
+                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+                         BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
                break;
        case AH_ESP_V4_FLOW:
        case AH_V4_FLOW:
                break;
        case AH_ESP_V4_FLOW:
        case AH_V4_FLOW:
@@ -2568,16 +2794,157 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
                return -EINVAL;
        }
 
                return -EINVAL;
        }
 
-       wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
-       wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
-       i40e_flush(hw);
+       if (flow_pctype) {
+               i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
+                                              flow_pctype)) |
+                       ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
+                                              flow_pctype)) << 32);
+               i_set = i40e_get_rss_hash_bits(nfc, i_setc);
+               i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
+                                 (u32)i_set);
+               i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
+                                 (u32)(i_set >> 32));
+               hena |= BIT_ULL(flow_pctype);
+       }
 
 
-       /* Save setting for future output/update */
-       pf->vsi[pf->lan_vsi]->rxnfc = *nfc;
+       i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+       i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+       i40e_flush(hw);
 
        return 0;
 }
 
 
        return 0;
 }
 
+/**
+ * i40e_handle_flex_filter_del - Update FD_INSET upon deletion of flex filter
+ * @vsi: Pointer to the targeted VSI
+ * @flow_type: Type of flow
+ * @flex_mask_bit: Bit index used in INSET register
+ *
+ * This function updates FD_INSET for flex filter if filter (using flex bytes)
+ * count (based on flow_type) reaches zero.
+ **/
+static void i40e_handle_flex_filter_del(struct i40e_vsi *vsi,
+                                       u8 flow_type,
+                                       u64 flex_mask_bit)
+{
+       u8 idx;
+       u64 val = 0;
+       u64 *input_set = NULL;
+       int flex_filter_cnt = 0;
+       u32 fsize = 0, src = 0;
+       struct hlist_node *node2;
+       struct i40e_fdir_filter *rule;
+       struct i40e_pf *pf = vsi->back;
+       bool clean_flex_pit = false;
+       bool update_flex_pit6 = false;
+       bool update_flex_pit7 = false;
+       u32 flex_pit6 = 0, flex_pit7 = 0;
+       u8 pit_idx = I40E_FLEX_PIT_IDX_START_L4;
+       u64 dest_word_l4, dest_word6, dest_word7;
+
+       switch (flow_type & FLOW_TYPE_MASK) {
+       case TCP_V4_FLOW:
+               idx = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+               input_set = &pf->fd_tcp4_input_set;
+               break;
+
+       case UDP_V4_FLOW:
+               idx = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+               input_set = &pf->fd_udp4_input_set;
+               break;
+
+       case IP_USER_FLOW:
+               idx = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+               input_set = &pf->fd_ip4_input_set;
+               pit_idx = I40E_FLEX_PIT_IDX_START_L3;
+               clean_flex_pit = true;
+               break;
+
+       default:
+               return;
+       }
+
+       /* To support TCP/UDP flow simultaneously, update either all
+        * relevant register or only needed.
+        */
+       if (pit_idx == I40E_FLEX_PIT_IDX_START_L4) {
+               flex_pit6 = i40e_read_rx_ctl(&pf->hw,
+                                            I40E_PRTQF_FLX_PIT(pit_idx));
+               flex_pit7 = i40e_read_rx_ctl(&pf->hw,
+                                            I40E_PRTQF_FLX_PIT(pit_idx + 1));
+               if ((!flex_pit6) || (!flex_pit7))
+                       return;
+
+               dest_word7 = I40E_FLEX_PIT_GET_DST(flex_pit7);
+               dest_word6 = I40E_FLEX_PIT_GET_DST(flex_pit6);
+               /* If dest_word7 or dest_word6 is UNUSED, it is safe to clear
+                * all relevant register
+                */
+               if ((dest_word6 == I40E_FLEX_DEST_UNUSED) ||
+                   (dest_word7 == I40E_FLEX_DEST_UNUSED)) {
+                       clean_flex_pit = true;
+               } else {
+                       /* Which dest word is used based on 'flex_mask_bit' */
+                       dest_word_l4 = I40E_FLEX_DEST_L4;
+                       if (flex_mask_bit & I40E_FLEX_51_MASK)
+                               dest_word_l4++;
+                        /* Otherwise figure out which register needs update */
+                       if (dest_word6 == dest_word_l4)
+                               update_flex_pit6 = true;
+                       else if (dest_word7 == dest_word_l4)
+                               update_flex_pit7 = true;
+               }
+       }
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               if (i40e_is_flex_filter(rule) && (rule->flow_type == flow_type))
+                       flex_filter_cnt++;
+       }
+
+       /* Do not update value of input set register if flow based flex filter
+        * (flow director filter which utilize bytes from payload as part of
+        * input set) count is non-zero
+        */
+       if (flex_filter_cnt)
+               return;
+
+       /* Read flow specific input set register */
+       val = i40e_read_fd_input_set(pf, idx);
+       if (!(val & flex_mask_bit))
+               return;
+
+       /* Update bit mask as needed */
+       val &= ~flex_mask_bit;
+
+       /* Write flow specific input set register */
+       i40e_write_fd_input_set(pf, idx, val);
+
+       /* Update values of FLX_PIT registers */
+       if (clean_flex_pit) {
+               for (idx = pit_idx; idx < (pit_idx + 3); idx++)
+                       i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(idx), 0);
+
+               /* Time to reset value of input set based on flow-type */
+               if (input_set && (!(*input_set))) {
+                       i40e_write_fd_input_set(pf, idx, *input_set);
+                       *input_set = 0;
+               }
+       } else if (update_flex_pit6) {
+               fsize = I40E_FLEX_PIT_GET_FSIZE(flex_pit6);
+               src = I40E_FLEX_PIT_GET_SRC(flex_pit6);
+       } else if (update_flex_pit7) {
+               fsize = I40E_FLEX_PIT_GET_FSIZE(flex_pit7);
+               src = I40E_FLEX_PIT_GET_SRC(flex_pit7);
+               pit_idx++;
+       }
+
+       if (fsize)
+               i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(pit_idx),
+                                 I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+                                                    fsize, src));
+}
+
 /**
  * i40e_match_fdir_input_set - Match a new filter against an existing one
  * @rule: The filter already added
 /**
  * i40e_match_fdir_input_set - Match a new filter against an existing one
  * @rule: The filter already added
@@ -2591,8 +2958,18 @@ static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
        if ((rule->dst_ip[0] != input->dst_ip[0]) ||
            (rule->src_ip[0] != input->src_ip[0]) ||
            (rule->dst_port != input->dst_port) ||
        if ((rule->dst_ip[0] != input->dst_ip[0]) ||
            (rule->src_ip[0] != input->src_ip[0]) ||
            (rule->dst_port != input->dst_port) ||
-           (rule->src_port != input->src_port))
+           (rule->src_port != input->src_port) ||
+           (rule->flow_type != input->flow_type) ||
+           (rule->ip4_proto != input->ip4_proto) ||
+           (rule->sctp_v_tag != input->sctp_v_tag) ||
+           (rule->q_index != input->q_index))
+               return false;
+
+       /* handle flex_filter, decide based upon pattern equality */
+       if (i40e_is_flex_filter(rule) &&
+           (rule->flex_bytes[3] != input->flex_bytes[3]))
                return false;
                return false;
+
        return true;
 }
 
        return true;
 }
 
@@ -2601,7 +2978,6 @@ static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
  * @vsi: Pointer to the targeted VSI
  * @input: The filter to update or NULL to indicate deletion
  * @sw_idx: Software index to the filter
  * @vsi: Pointer to the targeted VSI
  * @input: The filter to update or NULL to indicate deletion
  * @sw_idx: Software index to the filter
- * @cmd: The command to get or set Rx flow classification rules
  *
  * This function updates (or deletes) a Flow Director entry from
  * the hlist of the corresponding PF
  *
  * This function updates (or deletes) a Flow Director entry from
  * the hlist of the corresponding PF
@@ -2610,38 +2986,47 @@ static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
  **/
 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
                                          struct i40e_fdir_filter *input,
  **/
 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
                                          struct i40e_fdir_filter *input,
-                                         u16 sw_idx,
-                                         struct ethtool_rxnfc *cmd)
+                                         u16 sw_idx)
 {
        struct i40e_fdir_filter *rule, *parent;
        struct i40e_pf *pf = vsi->back;
        struct hlist_node *node2;
 {
        struct i40e_fdir_filter *rule, *parent;
        struct i40e_pf *pf = vsi->back;
        struct hlist_node *node2;
-       int err = -EINVAL;
+       int err = -ENOENT;
 
        parent = NULL;
        rule = NULL;
 
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->fdir_filter_list, fdir_node) {
 
        parent = NULL;
        rule = NULL;
 
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->fdir_filter_list, fdir_node) {
-               /* hash found, or no matching entry */
+               /* rule id found, or passed its spot in the list */
                if (rule->fd_id >= sw_idx)
                        break;
                parent = rule;
        }
 
                if (rule->fd_id >= sw_idx)
                        break;
                parent = rule;
        }
 
-       /* if there is an old rule occupying our place remove it */
+       /* is there is an old rule occupying our target filter slot? */
        if (rule && (rule->fd_id == sw_idx)) {
        if (rule && (rule->fd_id == sw_idx)) {
-               if (input && !i40e_match_fdir_input_set(rule, input))
-                               err = i40e_add_del_fdir(vsi, rule, false);
-               else if (!input)
-                               err = i40e_add_del_fdir(vsi, rule, false);
+
+               /* if this is an identical rule, don't change anything */
+               if (input && i40e_match_fdir_input_set(rule, input))
+                       return 0;
+
+               /* remove the list entry because we're either deleting
+                * the old rule or we're replacing it with a new rule
+                */
+               err = i40e_add_del_fdir(vsi, rule, false);
                hlist_del(&rule->fdir_node);
                hlist_del(&rule->fdir_node);
-               kfree(rule);
                pf->fdir_pf_active_filters--;
                pf->fdir_pf_active_filters--;
+
+               /* Was it flex filter and deemed for deletion */
+               if (i40e_is_flex_filter(rule) && (!input))
+                       i40e_handle_flex_filter_del(vsi, rule->flow_type,
+                                                   rule->flex_mask_bit);
+               kfree(rule);
        }
 
        /* If no input this was a delete, err should be 0 if a rule was
        }
 
        /* If no input this was a delete, err should be 0 if a rule was
-        * successfully found and removed from the list else -EINVAL
+        * successfully found and removed from the list else -ENOENT
         */
        if (!input)
                return err;
         */
        if (!input)
                return err;
@@ -2649,12 +3034,11 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
        /* initialize node and set software index */
        INIT_HLIST_NODE(&input->fdir_node);
 
        /* initialize node and set software index */
        INIT_HLIST_NODE(&input->fdir_node);
 
-       /* add filter to the list */
+       /* add filter to the ordered list */
        if (parent)
                hlist_add_behind(&input->fdir_node, &parent->fdir_node);
        else
        if (parent)
                hlist_add_behind(&input->fdir_node, &parent->fdir_node);
        else
-               hlist_add_head(&input->fdir_node,
-                              &pf->fdir_filter_list);
+               hlist_add_head(&input->fdir_node, &pf->fdir_filter_list);
 
        /* update counts */
        pf->fdir_pf_active_filters++;
 
        /* update counts */
        pf->fdir_pf_active_filters++;
@@ -2687,21 +3071,469 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
        if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
                return -EBUSY;
 
        if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
                return -EBUSY;
 
-       ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
+       ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location);
 
        i40e_fdir_check_and_reenable(pf);
        return ret;
 }
 
 /**
 
        i40e_fdir_check_and_reenable(pf);
        return ret;
 }
 
 /**
- * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
+ * i40e_program_flex_filter - Program flex filter
+ *
  * @vsi: pointer to the targeted VSI
  * @vsi: pointer to the targeted VSI
- * @cmd: command to get or set RX flow classification rules
+ * @flex_pit: value prepared to be written into flex PIT register
+ * @flex_pit6: flex pit[6] register value
+ * @offset_in_words: offset in packet (unit word)
+ * @src_word_off: user specified offset in words
+ * @dest_word: word location to be used in field vector
+ * @pit_idx: PIT register index
+ * @ort_idx: ORT register index
+ * @ort_val: value to be written in ORT register
  *
  *
- * Add Flow Director filters for a specific flow spec based on their
- * protocol.  Returns 0 if the filters were successfully added.
+ * This function programs relevant FLX_PIT registers, shuffles/updates the
+ * value as needed to support simultaneous flows (such as TCP4/UDP4/IP4).
  **/
  **/
-static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+static int i40e_program_flex_filter(struct i40e_vsi *vsi, u32 flex_pit,
+                                   u32 flex_pit6, u32 offset_in_words,
+                                   u32 src_word_off, u32 dest_word,
+                                   u32 pit_idx, u32 ort_idx, u32 ort_val)
+{
+       struct i40e_pf *pf;
+       u32 curr_src_off = 0, curr_fsize = 0, fsize = 0;
+
+       pf = vsi->back;
+
+       /* Program starting FLX_PIT register */
+       if (!flex_pit6) {
+               i40e_write_rx_ctl(&pf->hw,
+                                 I40E_PRTQF_FLX_PIT(pit_idx), flex_pit);
+       } else {
+               /* This is needed to support simultaneous flex filter for flow
+                * types TCP4 and UDP4.
+                * Program FLX_PIT[6] and FLX_PIT[7] correctly, if needed
+                * program the value read from FLX_PIT6 into register FLX_PIT7,
+                * adjust value for FLX_PIT6 if required.
+                */
+               curr_src_off = I40E_FLEX_PIT_GET_SRC(flex_pit6);
+               curr_fsize = I40E_FLEX_PIT_GET_FSIZE(flex_pit6);
+
+               if (offset_in_words == curr_src_off) {
+                       netif_err(pf, drv, vsi->netdev,
+                                 "Filter exist whose offset w.r.t it's payload matches with the filter being added. This breaks device programming rules, hence unsupported\n");
+                       return -EINVAL;
+               } else if (offset_in_words < curr_src_off) {
+                       /* Program value of FLX_PIT6 into
+                        * FLX_PIT7 and reprogram PIT6 with
+                        * modified value and program PIT8
+                        * as per rule
+                        */
+
+                       /* Adjust FSIZE of FLX_PIT[6]*/
+                       fsize = curr_src_off - offset_in_words;
+
+                       /* Program FLX_PIT[6] */
+                       i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(pit_idx),
+                                         I40E_FLEX_PREP_VAL(dest_word,
+                                                            fsize,
+                                                            src_word_off));
+                       /* Save value of FLX_PIT6, to be written into
+                        * FLX_PIT[7]
+                        */
+                       flex_pit = flex_pit6;
+
+                       /* Make sure src_word_off is correct for last
+                        * FLX_PIT register in this case
+                        */
+                       offset_in_words = curr_src_off + curr_fsize;
+               } else {
+                       /* Make sure src_word_off is correct for last
+                        * FLX_PIT register in this case
+                        */
+                       offset_in_words = offset_in_words +
+                                         I40E_FLEX_PIT_GET_FSIZE(flex_pit);
+               }
+
+               /* Program FLX_PIT[7] */
+               pit_idx++;
+               i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(pit_idx),
+                                 flex_pit);
+               goto program_last_flx_pit;
+       }
+
+       /* Propgram FLX_PIT register:
+        * FLX_PIT[3] for L3 flow and FLX_PIT[7] for L4 flow
+        */
+       pit_idx++;
+       offset_in_words++;
+       i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(pit_idx),
+                         I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+                                            1, offset_in_words));
+       /* Update before label because in other case, where jump
+        * is taken, values are updated correctly
+        */
+       offset_in_words++;
+
+program_last_flx_pit:
+       /* FLX_PIT[8] in case of L4 flow or FLX_PIT[3] in case of L3 flow */
+       pit_idx++;
+       i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(pit_idx),
+                         I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED,
+                                            1, offset_in_words));
+       i40e_write_rx_ctl(&pf->hw, I40E_GLQF_ORT(ort_idx), ort_val);
+
+       return 0;
+}
+
+/**
+ * i40e_validate_flex_filter_params - Validate flex filter params (user-def)
+ * @vsi: pointer to the targeted VSI
+ * @input: pointer to filter
+ * @offset: offset in packet
+ * @header_len: Header len based on flow-type
+ * @mask: inset mask for flex filter
+ * @flow_based_cnt: flex filter count based on flow-type
+ * @word_offset_in_payload: offset in unit words w.r.t payload
+ *
+ * This function validates 'offset' (mask) to make sure it satisfies device
+ * programming rule.
+ **/
+static int i40e_validate_flex_filter_params(struct i40e_vsi *vsi,
+                                           struct i40e_fdir_filter *input,
+                                           u16 offset, u16 header_len,
+                                           u64 *mask, u16 *flow_based_cnt,
+                                           u32 word_offset_in_payload)
+{
+       struct i40e_pf *pf;
+       struct hlist_node *node2;
+       struct i40e_fdir_filter *rule;
+       u16 existing_mask, specified_mask;
+
+       pf = vsi->back;
+
+       /* 'offset' needs to be within 0...480 bytes */
+       if (offset >= I40E_MAX_PARSE_BYTE) {
+               netif_err(pf, drv, vsi->netdev, "Max valid offset in unit word is %u, user passed %u\n",
+                         I40E_MAX_PARSE_BYTE, offset);
+               return -EINVAL;
+       }
+
+       /* 'offset' shall not be somewhere within header (L2/L3/L4) */
+       if (offset < header_len) {
+               netif_err(pf, drv, vsi->netdev, "Specified offset (%u) is referring to bytes in headers (bytes 0-%u), it should be somewhere in payload.\n",
+                         offset, header_len);
+               return -EINVAL;
+       }
+
+       /* Check for word boundary */
+       if (offset & 0x1) {
+               netif_err(pf, drv, vsi->netdev, "User specified mask address %u rounded down to word boundary %lu\n",
+                         offset, sizeof(u16));
+               return -EINVAL;
+       }
+
+       if (word_offset_in_payload >= I40E_MAX_SRC_WORD_OFFSET) {
+               netif_err(pf, drv, vsi->netdev, "Max. allowed bytes in payload are %u, but user specified offset %u\n",
+                         (I40E_MAX_SRC_WORD_OFFSET << 1),
+                         word_offset_in_payload);
+               return -EINVAL;
+       }
+
+       /* Does filter with flex byte for given flow-type exist and if offset
+        * (aka flex mask) of that existing filter is different, it is
+        * considered change in input set mask.
+        * Since we are allowing only one mask (aka offset) for each flow type,
+        * flag an error and fail the call.
+        */
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               if (!i40e_is_flex_filter(rule))
+                       continue;
+               existing_mask = ~(be16_to_cpu(rule->flex_mask[3]));
+               specified_mask = ~(be16_to_cpu(input->flex_mask[3]));
+               if (rule->flow_type == input->flow_type) {
+                       if (existing_mask != specified_mask) {
+                               /* This is like change in INPUT set mask
+                                * (aka 'offset') when used flex payload for
+                                * a given flow-type.
+                                */
+                               netif_err(pf, drv, vsi->netdev,
+                                         "Previous flex filter(ID: %u) exists for flow-type %u whose flex mask (aka 'offset'): %u is different from current mask :%u specified. Please delete previous flex filter and try again.\n",
+                                         input->fd_id,
+                                         input->flow_type & FLOW_TYPE_MASK,
+                                         existing_mask, specified_mask);
+                               return -EINVAL;
+                       }
+                       /* Keep track of flex filter cnt per flow */
+                       (*flow_based_cnt)++;
+                       *mask = rule->flex_mask_bit;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_handle_input_set - Detect and handle input set changes
+ * @vsi: pointer to the targeted VSI
+ * @fsp: pointer to RX flow classification spec
+ * @input: pointer to filter
+ *
+ * Reads register, detect change in input set based on existing register
+ * value and what user has passed. Update input set mask register if needed.
+ **/
+static int i40e_handle_input_set(struct i40e_vsi *vsi,
+                                struct ethtool_rx_flow_spec *fsp,
+                                struct i40e_fdir_filter *input)
+{
+       u8 idx;
+       u64 val = 0;
+       int ret = 0;
+       u16 flow_based_filter_cnt = 0;
+       u32 fsize = 1;
+       u32 flex_pit6 = 0;
+       u64 *input_set = NULL;
+       u16 offset = 0, header_len = 0;
+       bool inset_mask_change = false;
+       u16 flex_flow_based_filter_cnt = 0;
+       u32 dest_word = I40E_FLEX_DEST_L4;
+       u32 ort_idx = I40E_L4_GLQF_ORT_IDX;
+       u32 ort_val = I40E_L4_GLQF_ORT_VAL;
+       u64 flex_mask_bit = I40E_FLEX_50_MASK;
+       u32 pit_idx = I40E_FLEX_PIT_IDX_START_L4;
+       u32 src_word_off = 0, offset_in_words = 0, flex_pit = 0;
+       struct i40e_pf *pf;
+       u32 dest_ip_addr = 0;
+       struct hlist_node *node2;
+       struct i40e_cloud_filter *cloud_rule;
+
+       if (unlikely(!vsi))
+               return -EINVAL;
+
+       pf = vsi->back;
+       switch (fsp->flow_type & FLOW_TYPE_MASK) {
+       case TCP_V4_FLOW:
+               idx = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+               header_len = I40E_TCPIP_DUMMY_PACKET_LEN;
+               input_set = &pf->fd_tcp4_input_set;
+               flow_based_filter_cnt = pf->fd_tcp4_filter_cnt;
+               break;
+
+       case UDP_V4_FLOW:
+               idx = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+               header_len = I40E_UDPIP_DUMMY_PACKET_LEN;
+               input_set = &pf->fd_udp4_input_set;
+               flow_based_filter_cnt = pf->fd_udp4_filter_cnt;
+               break;
+
+       case IP_USER_FLOW:
+               idx = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+               header_len = I40E_IP_DUMMY_PACKET_LEN;
+               dest_word = I40E_FLEX_DEST_L3;
+               pit_idx = I40E_FLEX_PIT_IDX_START_L3;
+               flex_mask_bit = I40E_FLEX_53_MASK;
+               input_set = &pf->fd_ip4_input_set;
+               ort_idx = I40E_L3_GLQF_ORT_IDX;
+               ort_val = I40E_L3_GLQF_ORT_VAL;
+               flow_based_filter_cnt = pf->fd_ip4_filter_cnt;
+               break;
+
+       /* Extend this in future for SCTP4, IPV6, IPV6(TCP/UDP/SCTP), and  L2 */
+       default:
+               /* for all other flow types */
+               return 0;
+       }
+
+       /* Always read current value from device */
+       val = i40e_read_fd_input_set(pf, idx);
+
+       /* Save once (default value read from device)
+        * It is used upon ntuple off/on and when all filters are deleted
+        * for a given flow.
+        */
+       if (input_set && (!(*input_set)))
+               *input_set = val;
+
+       if (!i40e_is_flex_filter(input))
+               goto skip_flex_payload;
+
+       /* Treating mask as 'offset' in packet */
+       offset = ~(be16_to_cpu(input->flex_mask[3]));
+
+       /* zero based word index relative to start of payload */
+       src_word_off = (offset - header_len) >> 1;
+       offset_in_words = src_word_off;
+
+       /* Validate user-def params, specifically mask */
+       ret = i40e_validate_flex_filter_params(vsi, input, offset, header_len,
+                                              &flex_mask_bit,
+                                              &flex_flow_based_filter_cnt,
+                                              src_word_off);
+       if (ret)
+               return ret;
+
+       /* zero based word index relative to start of payload */
+       src_word_off = (offset - header_len) >> 1;
+       offset_in_words = src_word_off;
+
+       /* To avoid reading for L3 flows */
+       if (pit_idx == I40E_FLEX_PIT_IDX_START_L4)
+               flex_pit6 = i40e_read_rx_ctl(&pf->hw,
+                                            I40E_PRTQF_FLX_PIT(pit_idx));
+
+       /* Only applicable for L4 flow and only when FLX_PIT6 is valid */
+       if (flex_pit6 && (pit_idx == I40E_FLEX_PIT_IDX_START_L4)) {
+               if (flex_flow_based_filter_cnt) {
+                       if (flex_mask_bit == I40E_FLEX_51_MASK)
+                               /* Use next dest-word in field vector to be in
+                                * sync with flex_mask_bit
+                                */
+                               dest_word++;
+               } else {
+                       /* Use next dest-word in field vector */
+                       dest_word++;
+                       /* likewise update mask bit */
+                       flex_mask_bit = I40E_FLEX_51_MASK;
+               }
+       }
+
+       /* Store which FLEX WORD being used. Useful during delete filter */
+       input->flex_mask_bit = flex_mask_bit;
+
+       /* Prep value for FLX_PIT register */
+       flex_pit = I40E_FLEX_PREP_VAL(dest_word, fsize, src_word_off);
+
+       /* Do we have cloud filter which has at least one destination
+        * IP address (applicable only in case of tunnel) as part of
+        * input set? This is unsupported configuration in the context of
+        * filter with flexible payload
+        */
+       hlist_for_each_entry_safe(cloud_rule, node2,
+                                 &pf->cloud_filter_list, cloud_node) {
+               dest_ip_addr = be32_to_cpu(cloud_rule->inner_ip[0]);
+               if (dest_ip_addr) {
+                       netif_err(pf, drv, vsi->netdev,
+                                 "Previous cloud filter exist with at least one destination IP address %pI4 as part of input set. Please delete that cloud filter (ID: %u) and try again\n",
+                                 &dest_ip_addr, cloud_rule->id);
+                       return -EINVAL;
+               }
+       }
+
+       /* Set correponsing bit in input set mask register, and mark change */
+       if (!(val & flex_mask_bit)) {
+               inset_mask_change = true;
+               val |= flex_mask_bit;
+       }
+
+skip_flex_payload:
+       /* Default input set (TCP/UDP/SCTP) contains following
+        * fields: srcip + dest ip + src port + dest port
+        * For SCTP, there is one extra field, "verification tag"
+        */
+       if (val & I40E_L3_SRC_MASK) {
+               if (!fsp->h_u.tcp_ip4_spec.ip4src) {
+                       val &= ~I40E_L3_SRC_MASK;
+                       inset_mask_change = true;
+               }
+       } else {
+               if (fsp->h_u.tcp_ip4_spec.ip4src) {
+                       val |= I40E_L3_SRC_MASK;
+                       inset_mask_change = true;
+               }
+       }
+       if (val & I40E_L3_DST_MASK) {
+               if (!fsp->h_u.tcp_ip4_spec.ip4dst) {
+                       val &= ~I40E_L3_DST_MASK;
+                       inset_mask_change = true;
+               }
+       } else {
+               if (fsp->h_u.tcp_ip4_spec.ip4dst) {
+                       val |= I40E_L3_DST_MASK;
+                       inset_mask_change = true;
+               }
+       }
+       if (val & I40E_L4_SRC_MASK) {
+               if (!fsp->h_u.tcp_ip4_spec.psrc) {
+                       val &= ~I40E_L4_SRC_MASK;
+                       inset_mask_change = true;
+               }
+       } else {
+               if (fsp->h_u.tcp_ip4_spec.psrc) {
+                       val |= I40E_L4_SRC_MASK;
+                       inset_mask_change = true;
+               }
+       }
+       if (val & I40E_L4_DST_MASK) {
+               if (!fsp->h_u.tcp_ip4_spec.pdst) {
+                       val &= ~I40E_L4_DST_MASK;
+                       inset_mask_change = true;
+               }
+       } else {
+               if (fsp->h_u.tcp_ip4_spec.pdst) {
+                       val |= I40E_L4_DST_MASK;
+                       inset_mask_change = true;
+               }
+       }
+
+       /* Handle the scenario where previous input set mask for given
+        * flow-type indicates usage of flex payload, whereas current
+        * filter being added is not using flexible payload.
+        * In another words, changing from tuple which had flex bytes as part of
+        * tuple to tuples where no flex bytes are used.
+        *
+        * Extend following check as more FLEX_5x_MASK are used.
+        */
+       if (val & (I40E_FLEX_50_MASK | I40E_FLEX_51_MASK |
+           I40E_FLEX_52_MASK | I40E_FLEX_53_MASK)) {
+               if (!i40e_is_flex_filter(input))
+                       inset_mask_change = true;
+       }
+
+       if (!inset_mask_change)
+               return 0;
+
+       if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+               netif_err(pf, drv, vsi->netdev, "Change of input set is not supported when MFP mode is enabled\n");
+               return -EOPNOTSUPP;
+       }
+       if (flex_flow_based_filter_cnt || flow_based_filter_cnt) {
+               netif_err(pf, drv, vsi->netdev, "Change of input set is not supported when there are existing filters(%u) for specified flow-type: %u. Please delete them and re-try\n",
+                         (flex_flow_based_filter_cnt) ?
+                         flex_flow_based_filter_cnt : flow_based_filter_cnt,
+                         fsp->flow_type & FLOW_TYPE_MASK);
+               return -EOPNOTSUPP;
+       }
+
+       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+               netif_info(pf, drv, vsi->netdev, "FD_INSET mask is changing to 0x%016llx\n",
+                          val);
+
+       /* Program FLX_PIT registers to support flex filter */
+       if (flex_pit) {
+               ret = i40e_program_flex_filter(vsi, flex_pit, flex_pit6,
+                                              offset_in_words, src_word_off,
+                                              dest_word, pit_idx,
+                                              ort_idx, ort_val);
+               if (ret)
+                       return ret;
+       }
+
+       /* Update input mask register since input set mask changed */
+       i40e_write_fd_input_set(pf, idx, val);
+
+       netif_info(pf, drv, vsi->netdev, "Input set mask has been changed. Please note that, it affects specified interface and any other related/derived interfaces\n");
+
+       return 0;
+}
+
+/**
+ * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ *
+ * Add Flow Director filters for a specific flow spec based on their
+ * protocol.  Returns 0 if the filters were successfully added.
+ **/
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
                                 struct ethtool_rxnfc *cmd)
 {
        struct ethtool_rx_flow_spec *fsp;
                                 struct ethtool_rxnfc *cmd)
 {
        struct ethtool_rx_flow_spec *fsp;
@@ -2712,7 +3544,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 
        if (!vsi)
                return -EINVAL;
 
        if (!vsi)
                return -EINVAL;
-
        pf = vsi->back;
 
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
        pf = vsi->back;
 
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
@@ -2729,8 +3560,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
                return -EBUSY;
 
        fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
                return -EBUSY;
 
        fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
-       pf = vsi->back;
-
        if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
                              pf->hw.func_caps.fd_filters_guaranteed)) {
                return -EINVAL;
        if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
                              pf->hw.func_caps.fd_filters_guaranteed)) {
                return -EINVAL;
@@ -2741,18 +3570,16 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
                return -EINVAL;
 
        input = kzalloc(sizeof(*input), GFP_KERNEL);
                return -EINVAL;
 
        input = kzalloc(sizeof(*input), GFP_KERNEL);
-
        if (!input)
                return -ENOMEM;
 
        if (!input)
                return -ENOMEM;
 
-       input->fd_id = fsp->location;
-
        if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
                input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
        else
                input->dest_ctl =
                             I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
 
        if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
                input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
        else
                input->dest_ctl =
                             I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
 
+       input->fd_id = fsp->location;
        input->q_index = fsp->ring_cookie;
        input->flex_off = 0;
        input->pctype = 0;
        input->q_index = fsp->ring_cookie;
        input->flex_off = 0;
        input->pctype = 0;
@@ -2770,105 +3597,164 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
        input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
        input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
 
        input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
        input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
 
-       if (ntohl(fsp->m_ext.data[1])) {
-               if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
-                       netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+       /* Deciding factor, whether it is flex filter or filter for VF
+        * is value of 'mask' (user-def N m)
+        * If mask is specified then treat it as flex filter
+        * otherwise this filter is for VF.
+        * This distinction is needed due to overloading usage of
+        * user-def field.
+        */
+       if (fsp->m_ext.data[1] == cpu_to_be32(~0)) {
+               vf_id = be32_to_cpu(fsp->h_ext.data[1]);
+               if (vf_id >= pf->num_alloc_vfs) {
+                       netif_info(pf, drv, vsi->netdev,
+                                  "Invalid VF id %d\n", vf_id);
                        goto free_input;
                }
                        goto free_input;
                }
-               vf_id = ntohl(fsp->h_ext.data[1]);
                /* Find vsi id from vf id and override dest vsi */
                input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
                if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
                /* Find vsi id from vf id and override dest vsi */
                input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
                if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
-                       netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+                       netif_info(pf, drv, vsi->netdev,
+                                  "Invalid queue id %d for VF %d\n",
+                                  input->q_index, vf_id);
                        goto free_input;
                }
                        goto free_input;
                }
+       } else {
+               /* flex filter is supported only for main VSI */
+               if (vsi->type != I40E_VSI_MAIN) {
+                       netif_err(pf, drv, vsi->netdev,
+                                 "Unsupported interface type for adding filter using user-defs\n");
+                       goto free_input;
+               }
+
+               /* initialized to known values */
+               input->flex_bytes[2] = 0;
+               input->flex_bytes[3] = 0;
+               input->flex_mask[2] = 0;
+               input->flex_mask[3] = 0;
+               if ((fsp->h_ext.data[0] == cpu_to_be32(0x0)) &&
+                   (fsp->h_ext.data[1] != cpu_to_be32(~0))) {
+                       input->flex_bytes[2] = fsp->h_ext.data[1];
+                       if (input->flex_bytes[2]) {
+                               netif_err(pf, drv, vsi->netdev,
+                                         "Only one word is supported for flex filter\n");
+                               goto free_input;
+                       }
+                       /* Store only relevant section of user-defs */
+                       input->flex_bytes[3] = fsp->h_ext.data[1] >> 16;
+                       input->flex_mask[3] = fsp->m_ext.data[1] >> 16;
+               }
+       }
+
+       /*  Detect and handle change for input set mask */
+       ret = i40e_handle_input_set(vsi, fsp, input);
+       if (ret) {
+               netif_err(pf, drv, vsi->netdev, "Unable to handle change in input set mask\n");
+               goto free_input;
        }
 
        ret = i40e_add_del_fdir(vsi, input, true);
 free_input:
        if (ret)
                kfree(input);
        }
 
        ret = i40e_add_del_fdir(vsi, input, true);
 free_input:
        if (ret)
                kfree(input);
-       else
-               i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+       else {
+               (void)i40e_del_cloud_filter_ethtool(pf, cmd);
+               i40e_update_ethtool_fdir_entry(vsi, input, fsp->location);
+       }
 
        return ret;
 }
 
 
        return ret;
 }
 
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
 /**
 /**
- * i40e_vxlan_filter_mask2flags- Convert Vxlan filter details to filter type
+ * i40e_cloud_filter_mask2flags- Convert cloud filter details to filter type
+ * @pf: pointer to the physical function struct
  * @fsp: RX flow classification rules
  * @flags: Resultant combination of all the fields to decide the tuple
  *
  * @fsp: RX flow classification rules
  * @flags: Resultant combination of all the fields to decide the tuple
  *
+ * The general trick in setting these flags is that if the mask field for
+ * a value is non-zero, then the field itself was set to something, so we
+ * use this to tell us what has been selected.
+ *
  * Returns 0 if a valid filter type was identified.
  **/
  * Returns 0 if a valid filter type was identified.
  **/
-static inline i40e_status i40e_vxlan_filter_mask2flags(
+static int i40e_cloud_filter_mask2flags(struct i40e_pf *pf,
                                        struct ethtool_rx_flow_spec *fsp,
                                        u8 *flags)
 {
                                        struct ethtool_rx_flow_spec *fsp,
                                        u8 *flags)
 {
-       static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
-       static const u8 mac_zero[] = { 0, 0, 0, 0, 0, 0 };
+       u32 tenant_id;
        u8 i = 0;
        u8 i = 0;
-       u16 vlan_tci = fsp->m_ext.vlan_tci;
-       u32 vxlan_id = 0;
 
        *flags = 0;
 
 
        *flags = 0;
 
-       if (ntohl(fsp->h_ext.data[0] != 0xffffffff))
-               vxlan_id = ntohl(fsp->m_ext.data[0]);
-
-       switch (fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+       switch (fsp->flow_type & FLOW_TYPE_MASK) {
        case ETHER_FLOW:
        case ETHER_FLOW:
-               if (!memcmp(fsp->m_u.ether_spec.h_dest, mac_broadcast,
-                   sizeof(mac_broadcast)))
+               /* use is_broadcast and is_zero to check for all 0xf or 0 */
+               if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
                        i |= I40E_CLOUD_FIELD_OMAC;
                        i |= I40E_CLOUD_FIELD_OMAC;
-               else if (!memcmp(fsp->m_u.ether_spec.h_dest, mac_zero,
-                   sizeof(mac_broadcast)))
+               } else if (is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
                        i &= ~I40E_CLOUD_FIELD_OMAC;
                        i &= ~I40E_CLOUD_FIELD_OMAC;
-               else
+               } else {
+                       dev_info(&pf->pdev->dev, "Bad ether dest mask %pM\n",
+                                fsp->m_u.ether_spec.h_dest);
                        return I40E_ERR_CONFIG;
                        return I40E_ERR_CONFIG;
+               }
 
 
-               if (!memcmp(fsp->m_u.ether_spec.h_source, mac_broadcast,
-                   sizeof(mac_broadcast)))
+               if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
                        i |= I40E_CLOUD_FIELD_IMAC;
                        i |= I40E_CLOUD_FIELD_IMAC;
-               else if (!memcmp(fsp->m_u.ether_spec.h_source, mac_zero,
-                   sizeof(mac_broadcast)))
+               } else if (is_zero_ether_addr(fsp->m_u.ether_spec.h_source)) {
                        i &= ~I40E_CLOUD_FIELD_IMAC;
                        i &= ~I40E_CLOUD_FIELD_IMAC;
-               else
+               } else {
+                       dev_info(&pf->pdev->dev, "Bad ether source mask %pM\n",
+                                fsp->m_u.ether_spec.h_source);
                        return I40E_ERR_CONFIG;
                        return I40E_ERR_CONFIG;
+               }
                break;
 
        case IP_USER_FLOW:
                break;
 
        case IP_USER_FLOW:
-               if (fsp->m_u.usr_ip4_spec.ip4dst == 0xffffffff)
+               if (fsp->m_u.usr_ip4_spec.ip4dst == cpu_to_be32(0xffffffff)) {
                        i |= I40E_CLOUD_FIELD_IIP;
                        i |= I40E_CLOUD_FIELD_IIP;
-               else if (fsp->m_u.usr_ip4_spec.ip4dst == 0)
+               } else if (!fsp->m_u.usr_ip4_spec.ip4dst) {
                        i &= ~I40E_CLOUD_FIELD_IIP;
                        i &= ~I40E_CLOUD_FIELD_IIP;
-               else
+               } else {
+                       dev_info(&pf->pdev->dev, "Bad ip dst mask 0x%08x\n",
+                                be32_to_cpu(fsp->m_u.usr_ip4_spec.ip4dst));
                        return I40E_ERR_CONFIG;
                        return I40E_ERR_CONFIG;
+               }
                break;
                break;
+
        default:
                return I40E_ERR_CONFIG;
        }
 
        default:
                return I40E_ERR_CONFIG;
        }
 
-       switch (vlan_tci & 0x7fff) {
-       case 0x7fff:
+       switch (be16_to_cpu(fsp->m_ext.vlan_tci)) {
+       case 0xffff:
+               if (fsp->h_ext.vlan_tci & cpu_to_be16(~0x7fff)) {
+                       dev_info(&pf->pdev->dev, "Bad vlan %u\n",
+                                be16_to_cpu(fsp->h_ext.vlan_tci));
+                       return I40E_ERR_CONFIG;
+               }
                i |= I40E_CLOUD_FIELD_IVLAN;
                break;
        case 0:
                i &= ~I40E_CLOUD_FIELD_IVLAN;
                break;
        default:
                i |= I40E_CLOUD_FIELD_IVLAN;
                break;
        case 0:
                i &= ~I40E_CLOUD_FIELD_IVLAN;
                break;
        default:
+               dev_info(&pf->pdev->dev, "Bad vlan mask %u\n",
+                        be16_to_cpu(fsp->m_ext.vlan_tci));
                return I40E_ERR_CONFIG;
        }
 
                return I40E_ERR_CONFIG;
        }
 
-       switch (vxlan_id & 0xffffff) {
-       case 0xffffff:
+       /* we already know that the user-def field was set, that's how we
+        * got here, so we don't need to check that.  However, we need to
+        * see if 0xffffffff or a non-zero three-byte tenant id was set.
+        */
+       tenant_id = be32_to_cpu(fsp->h_ext.data[0]);
+       if (tenant_id && tenant_id <= 0xffffff) {
                i |= I40E_CLOUD_FIELD_TEN_ID;
                i |= I40E_CLOUD_FIELD_TEN_ID;
-               break;
-       case 0:
+       } else if (tenant_id == 0xffffffff || tenant_id == 0) {
                i &= ~I40E_CLOUD_FIELD_TEN_ID;
                i &= ~I40E_CLOUD_FIELD_TEN_ID;
-               break;
-       default:
+       } else {
+               dev_info(&pf->pdev->dev, "Bad tenant/vxlan id %d\n", tenant_id);
                return I40E_ERR_CONFIG;
        }
 
                return I40E_ERR_CONFIG;
        }
 
@@ -2877,29 +3763,35 @@ static inline i40e_status i40e_vxlan_filter_mask2flags(
 }
 
 /**
 }
 
 /**
- * i40e_add_vxlan_filter_ethtool - Add vxlan filter
+ * i40e_add_cloud_filter_ethtool - Add cloud filter
  * @pf: pointer to the physical function struct
  * @pf: pointer to the physical function struct
- * @fsp: RX flow classification rules
+ * @cmd: The command to get or set Rx flow classification rules
  *
  *
- * Add vxlan filter for a specific flow spec.
+ * Add cloud filter for a specific flow spec.
  * Returns 0 if the filter were successfully added.
  **/
  * Returns 0 if the filter were successfully added.
  **/
-static int i40e_add_vxlan_filter_ethtool(struct i40e_pf *pf,
-                                        struct ethtool_rx_flow_spec *fsp)
+static int i40e_add_cloud_filter_ethtool(struct i40e_pf *pf,
+                                        struct ethtool_rxnfc *cmd)
 {
 {
-       struct i40e_vsi *dst_vsi, *vsi = NULL;
-       struct i40e_cloud_filter *rule, *parent, *pfilter = NULL;
+       struct i40e_cloud_filter *rule, *parent, *filter = NULL;
+       struct ethtool_rx_flow_spec *fsp;
        struct hlist_node *node2;
        struct hlist_node *node2;
+       struct i40e_vsi *dst_vsi;
        u16 vf_id, vsi_idx;
        u8 flags = 0;
        int ret;
 
        u16 vf_id, vsi_idx;
        u8 flags = 0;
        int ret;
 
-       if (ntohl(fsp->m_ext.data[1])) {
-               vf_id = (u16)ntohl(fsp->h_ext.data[1]);
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+           test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+               return -EBUSY;
+
+       fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+       if (fsp->m_ext.data[1] == cpu_to_be32(0xffffffff)) {
+               vf_id = (u16)be32_to_cpu(fsp->h_ext.data[1]);
                /* if Vf id >= num_vfs, program a filter for PF Main VSI */
                if (vf_id >= pf->num_alloc_vfs) {
                        dev_info(&pf->pdev->dev,
                /* if Vf id >= num_vfs, program a filter for PF Main VSI */
                if (vf_id >= pf->num_alloc_vfs) {
                        dev_info(&pf->pdev->dev,
-                                "Out of range vf_id, adding the cloud filter for Main VSI %d\n",
+                                "Out of range vf_id %d, adding the cloud filter to PF\n",
                                 vf_id);
                        dst_vsi = pf->vsi[pf->lan_vsi];
                } else {
                                 vf_id);
                        dst_vsi = pf->vsi[pf->lan_vsi];
                } else {
@@ -2911,163 +3803,170 @@ static int i40e_add_vxlan_filter_ethtool(struct i40e_pf *pf,
                                return -EINVAL;
                        }
                }
                                return -EINVAL;
                        }
                }
-       } else {
+       } else if (!fsp->m_ext.data[1]) {
                dst_vsi = pf->vsi[pf->lan_vsi];
                dst_vsi = pf->vsi[pf->lan_vsi];
+       } else {
+               return -EINVAL;
        }
 
        }
 
-       if (fsp->ring_cookie >= dst_vsi->num_queue_pairs) {
+       if (fsp->ring_cookie == ~0) {
+               dev_info(&pf->pdev->dev, "No drop option for cloud filters\n");
+               return -EINVAL;
+       } else if (fsp->ring_cookie >= dst_vsi->num_queue_pairs) {
                dev_info(&pf->pdev->dev,
                         "Invalid queue_id %llu\n", fsp->ring_cookie);
                return -EINVAL;
        }
 
                dev_info(&pf->pdev->dev,
                         "Invalid queue_id %llu\n", fsp->ring_cookie);
                return -EINVAL;
        }
 
-       ret = i40e_vxlan_filter_mask2flags(fsp, &flags);
+       ret = i40e_cloud_filter_mask2flags(pf, fsp, &flags);
        if (ret || !flags) {
        if (ret || !flags) {
-               dev_info(&pf->pdev->dev,
-                        "Invalid mask config, ret = %d, flags = %d\n",
-                        ret, flags);
+               dev_info(&pf->pdev->dev, "Invalid mask config, flags = %d\n",
+                        flags);
                return -EINVAL;
        }
 
                return -EINVAL;
        }
 
+       /* if filter exists with same id, delete the old one */
        parent = NULL;
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->cloud_filter_list, cloud_node) {
                /* filter exists with the id */
        parent = NULL;
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->cloud_filter_list, cloud_node) {
                /* filter exists with the id */
-               if (rule->id >= fsp->location) {
-                       pfilter = rule;
+               if (rule->id == fsp->location)
+                       filter = rule;
+
+               /* bail out if we've passed the likely location in the list */
+               if (rule->id >= fsp->location)
                        break;
                        break;
-               }
+
+               /* track where we left off */
                parent = rule;
        }
                parent = rule;
        }
-       /* if filter exists with same id, delete old */
-       if (pfilter && (pfilter->id == fsp->location)) {
-               vsi = i40e_find_vsi_from_id(pf, pfilter->vsi_id);
-               if (!vsi) {
-                       dev_info(&pf->pdev->dev, "no vsi with vsi_id %d\n",
-                               pfilter->vsi_id);
-                       return -ENOSYS;
-               }
-               ret = i40e_add_del_cloud_filter(pf, pfilter, vsi, false);
-               if (ret) {
+       if (filter && (filter->id == fsp->location)) {
+               /* found it in the cloud list, so remove it */
+               ret = i40e_add_del_cloud_filter(pf, filter, false);
+               if (ret && (pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)) {
                        dev_info(&pf->pdev->dev,
                        dev_info(&pf->pdev->dev,
-                                "fail to delete old cloud filter, err = %d\n",
-                                ret);
-                       return -ENOSYS;
+                                "fail to delete old cloud filter, err %s, aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       return i40e_aq_rc_to_posix(ret,
+                                                  pf->hw.aq.asq_last_status);
                }
                }
-               hlist_del(&pfilter->cloud_node);
-               kfree(pfilter);
+               hlist_del(&filter->cloud_node);
+               kfree(filter);
                pf->num_cloud_filters--;
                pf->num_cloud_filters--;
+       } else {
+               /* not in the cloud list, so check the PF's fdir list */
+               (void)i40e_del_fdir_entry(pf->vsi[pf->lan_vsi], cmd);
        }
 
        }
 
-       pfilter = kzalloc(sizeof(*pfilter), GFP_KERNEL);
+       /* Presence of cloud filter and flex filter is mutually exclusive */
+       if (pf->fd_flex_filter_cnt) {
+               dev_err(&pf->pdev->dev,
+                       "Filters(%d) using user-def (flexible payload) are present. Please delete them and try again\n",
+                       pf->fd_flex_filter_cnt);
+               return I40E_NOT_SUPPORTED;
+       }
 
 
-       if (!pfilter)
+       filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+       if (!filter)
                return -ENOMEM;
 
                return -ENOMEM;
 
-       pfilter->id = fsp->location;
-       pfilter->vsi_id = dst_vsi->id;
-       switch (fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+       filter->id = fsp->location;
+       filter->seid = dst_vsi->seid;
+
+       switch (fsp->flow_type & FLOW_TYPE_MASK) {
        case ETHER_FLOW:
        case ETHER_FLOW:
-               ether_addr_copy(pfilter->outer_mac,
-                       fsp->h_u.ether_spec.h_dest);
-               ether_addr_copy(pfilter->inner_mac,
-                       fsp->h_u.ether_spec.h_source);
+               ether_addr_copy(filter->outer_mac,
+                               fsp->h_u.ether_spec.h_dest);
+               ether_addr_copy(filter->inner_mac,
+                               fsp->h_u.ether_spec.h_source);
                break;
                break;
+
        case IP_USER_FLOW:
        case IP_USER_FLOW:
-               pfilter->inner_ip[0] = fsp->h_u.usr_ip4_spec.ip4dst;
+               if (flags & I40E_CLOUD_FIELD_TEN_ID) {
+                       dev_info(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
+                       return I40E_ERR_CONFIG;
+               }
+               filter->inner_ip[0] = fsp->h_u.usr_ip4_spec.ip4dst;
                break;
                break;
+
        default:
        default:
-               dev_info(&pf->pdev->dev, "unknown flow type\n");
-               kfree(pfilter);
+               dev_info(&pf->pdev->dev, "unknown flow type 0x%x\n",
+                        (fsp->flow_type & FLOW_TYPE_MASK));
+               kfree(filter);
                return I40E_ERR_CONFIG;
        }
 
                return I40E_ERR_CONFIG;
        }
 
-       pfilter->inner_vlan = fsp->h_ext.vlan_tci;
-
-       if (ntohl(fsp->h_ext.data[0] != 0xffffffff))
-               pfilter->tenant_id = ntohl(fsp->h_ext.data[0]);
-       /* else this is a  L3 VEB filter for non-tunneled packets or a tuple
-        * without vni.
-        */
-       pfilter->queue_id = fsp->ring_cookie;
-       pfilter->tunnel_type = I40E_CLOUD_TNL_TYPE_XVLAN;
-       pfilter->flags = flags;
+       if (be32_to_cpu(fsp->h_ext.data[0]) != 0xffffffff) {
+               filter->tenant_id = be32_to_cpu(fsp->h_ext.data[0]);
+               filter->tunnel_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+       } else {
+               /* L3 VEB filter for non-tunneled packets or a tuple w/o vni  */
+               filter->tenant_id = 0;
+               filter->tunnel_type = I40E_CLOUD_TNL_TYPE_NONE;
+       }
+       filter->queue_id = fsp->ring_cookie;
+       filter->flags = flags;
+       filter->inner_vlan = fsp->h_ext.vlan_tci;
 
 
-       ret = i40e_add_del_cloud_filter(pf, pfilter, dst_vsi, true);
+       ret = i40e_add_del_cloud_filter(pf, filter, true);
        if (ret) {
        if (ret) {
-               kfree(pfilter);
+               kfree(filter);
                dev_info(&pf->pdev->dev,
                         "fail to add cloud filter, err = %d\n", ret);
                dev_info(&pf->pdev->dev,
                         "fail to add cloud filter, err = %d\n", ret);
-               return -ENOSYS;
+               return i40e_aq_rc_to_posix(ret, pf->hw.aq.asq_last_status);
        }
 
        }
 
-       INIT_HLIST_NODE(&pfilter->cloud_node);
-       /* add filter to the list */
-
+       /* add filter to the ordered list */
+       INIT_HLIST_NODE(&filter->cloud_node);
        if (parent)
        if (parent)
-               hlist_add_behind(&pfilter->cloud_node, &parent->cloud_node);
+               hlist_add_behind(&filter->cloud_node, &parent->cloud_node);
        else
        else
-               hlist_add_head(&pfilter->cloud_node,
-                                      &pf->cloud_filter_list);
+               hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
        pf->num_cloud_filters++;
 
        return 0;
 }
 
 /**
        pf->num_cloud_filters++;
 
        return 0;
 }
 
 /**
- * i40e_del_vxlan_filter_ethtool - del vxlan filter
+ * i40e_del_cloud_filter_ethtool - del vxlan filter
  * @pf: pointer to the physical function struct
  * @pf: pointer to the physical function struct
- * @fsp: RX flow classification rules
+ * @cmd: RX flow classification rules
  *
  * Delete vxlan filter for a specific flow spec.
  * Returns 0 if the filter was successfully deleted.
  **/
  *
  * Delete vxlan filter for a specific flow spec.
  * Returns 0 if the filter was successfully deleted.
  **/
-static int i40e_del_vxlan_filter_ethtool(struct i40e_pf *pf,
-                                        struct ethtool_rx_flow_spec *fsp)
+static int i40e_del_cloud_filter_ethtool(struct i40e_pf *pf,
+                                        struct ethtool_rxnfc *cmd)
 {
 {
-       struct i40e_cloud_filter *rule, *pfilter = NULL;
-       struct i40e_vsi *vsi = NULL;
+       struct i40e_cloud_filter *rule, *filter = NULL;
+       struct ethtool_rx_flow_spec *fsp;
        struct hlist_node *node2;
        struct hlist_node *node2;
-       int ret;
 
 
+       fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->cloud_filter_list, cloud_node) {
        hlist_for_each_entry_safe(rule, node2,
                                  &pf->cloud_filter_list, cloud_node) {
-               /* filter found with the id */
-               if (rule->id == fsp->location) {
-                       pfilter = rule;
+               /* filter found */
+               if (rule->id == fsp->location)
+                       filter = rule;
+
+               /* bail out if we've passed the likely location in the list */
+               if (rule->id >= fsp->location)
                        break;
                        break;
-               }
        }
        }
-       if (!pfilter) {
-               dev_info(&pf->pdev->dev, "no cloud filter exists with id %d\n",
-                       fsp->location);
+       if (!filter)
                return -ENOENT;
                return -ENOENT;
-       }
-       vsi = i40e_find_vsi_from_id(pf, pfilter->vsi_id);
-       if (!vsi) {
-               dev_info(&pf->pdev->dev,
-                        "no vsi with vsi_id %d\n", pfilter->vsi_id);
-               return -ENOSYS;
-       }
 
 
-       ret = i40e_add_del_cloud_filter(pf, pfilter, vsi, false);
-       if (ret) {
-               dev_info(&pf->pdev->dev,
-                        "failed to delete cloud filter, err = %d\n",
-                        ret);
-               return -ENOSYS;
-       }
-
-       /* remove filter from the list */
-       hlist_del(&pfilter->cloud_node);
-       kfree(pfilter);
+       /* remove filter from the list even if failed to remove from device */
+       (void)i40e_add_del_cloud_filter(pf, filter, false);
+       hlist_del(&filter->cloud_node);
+       kfree(filter);
        pf->num_cloud_filters--;
 
        return 0;
 }
 
        pf->num_cloud_filters--;
 
        return 0;
 }
 
-#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
-
 /**
  * i40e_set_rxnfc - command to set RX flow classification rules
  * @netdev: network interface device structure
 /**
  * i40e_set_rxnfc - command to set RX flow classification rules
  * @netdev: network interface device structure
@@ -3078,9 +3977,7 @@ static int i40e_del_vxlan_filter_ethtool(struct i40e_pf *pf,
 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
        struct ethtool_rx_flow_spec *fsp;
        struct ethtool_rx_flow_spec *fsp;
-#endif /* HAVE_VXLAN_RX_OFFLOAD */
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
        int ret = -EOPNOTSUPP;
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
        int ret = -EOPNOTSUPP;
@@ -3089,32 +3986,29 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
        case ETHTOOL_SRXFH:
                ret = i40e_set_rss_hash_opt(pf, cmd);
                break;
        case ETHTOOL_SRXFH:
                ret = i40e_set_rss_hash_opt(pf, cmd);
                break;
+
        case ETHTOOL_SRXCLSRLINS:
        case ETHTOOL_SRXCLSRLINS:
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+               /* if the user-def mask is non-zero, then something was set
+                * in the user-def value to ask us to set up a cloud filter
+                * specifying a target VF rather than flow director filter
+                */
                fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
                fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
-#define I40E_USER_DATA_VXLAN_CLOUD_FILTER 1
-               if (ntohl(fsp->h_ext.data[0]) >=
-                                       I40E_USER_DATA_VXLAN_CLOUD_FILTER)
-                       ret = i40e_add_vxlan_filter_ethtool(pf, fsp);
-               else
+               /* Following check qualifies it to be cloud filter */
+               if ((fsp->m_ext.data[0] == cpu_to_be32(~0)) &&
+                   (fsp->m_ext.data[1] == cpu_to_be32(~0))) {
+                       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+                               ret = i40e_add_cloud_filter_ethtool(pf, cmd);
+               } else {
                        ret = i40e_add_fdir_ethtool(vsi, cmd);
                        ret = i40e_add_fdir_ethtool(vsi, cmd);
+               }
                break;
                break;
-#else
-               ret = i40e_add_fdir_ethtool(vsi, cmd);
-               break;
-#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+
        case ETHTOOL_SRXCLSRLDEL:
        case ETHTOOL_SRXCLSRLDEL:
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
-               fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
-               if (ntohl(fsp->h_ext.data[0]) >=
-                                       I40E_USER_DATA_VXLAN_CLOUD_FILTER)
-                       ret = i40e_del_vxlan_filter_ethtool(pf, fsp);
-               else
-                       ret = i40e_del_fdir_entry(vsi, cmd);
-#else
                ret = i40e_del_fdir_entry(vsi, cmd);
                ret = i40e_del_fdir_entry(vsi, cmd);
-#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+               if (ret == -ENOENT)
+                       ret = i40e_del_cloud_filter_ethtool(pf, cmd);
                break;
                break;
+
        default:
                break;
        }
        default:
                break;
        }
@@ -3209,7 +4103,7 @@ static int i40e_set_channels(struct net_device *dev,
 }
 
 #endif /* ETHTOOL_SCHANNELS */
 }
 
 #endif /* ETHTOOL_SCHANNELS */
-#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+
 #if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
 /**
  * i40e_get_rxfh_key_size - get the RSS hash key size
 #if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
 /**
  * i40e_get_rxfh_key_size - get the RSS hash key size
@@ -3264,10 +4158,9 @@ static int i40e_get_rxfh_indir(struct net_device *netdev, u32 *indir)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
-       struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       u32 reg_val;
-       int i, j;
+       u8 *lut, *seed = NULL;
+       int ret;
+       u16 i;
 
 #ifdef HAVE_RXFH_HASHFUNC
        if (hfunc)
 
 #ifdef HAVE_RXFH_HASHFUNC
        if (hfunc)
@@ -3277,26 +4170,22 @@ static int i40e_get_rxfh_indir(struct net_device *netdev, u32 *indir)
        if (!indir)
                return 0;
 
        if (!indir)
                return 0;
 
-       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
-               reg_val = rd32(hw, I40E_PFQF_HLUT(i));
-               indir[j++] = reg_val & 0xff;
-               indir[j++] = (reg_val >> 8) & 0xff;
-               indir[j++] = (reg_val >> 16) & 0xff;
-               indir[j++] = (reg_val >> 24) & 0xff;
-       }
-
 #if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
 #if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
-       if (key) {
-               for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
-                       reg_val = rd32(hw, I40E_PFQF_HKEY(i));
-                       key[j++] = (u8)(reg_val & 0xff);
-                       key[j++] = (u8)((reg_val >> 8) & 0xff);
-                       key[j++] = (u8)((reg_val >> 16) & 0xff);
-                       key[j++] = (u8)((reg_val >> 24) & 0xff);
-               }
-       }
+       seed = key;
 #endif
 #endif
-       return 0;
+       lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
+       if (!lut)
+               return -ENOMEM;
+       ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE);
+       if (ret)
+               goto out;
+       for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+               indir[i] = (u32)(lut[i]);
+
+out:
+       kfree(lut);
+
+       return ret;
 }
 #else
 /**
 }
 #else
 /**
@@ -3312,23 +4201,27 @@ static int i40e_get_rxfh_indir(struct net_device *netdev,
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
-       struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       u32 reg_val;
-       int i, j;
+       u8 *lut;
+       int ret;
+       u16 i;
 
        if (indir->size < I40E_HLUT_ARRAY_SIZE)
                return -EINVAL;
 
 
        if (indir->size < I40E_HLUT_ARRAY_SIZE)
                return -EINVAL;
 
-       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
-               reg_val = rd32(hw, I40E_PFQF_HLUT(i));
-               indir->ring_index[j++] = reg_val & 0xff;
-               indir->ring_index[j++] = (reg_val >> 8) & 0xff;
-               indir->ring_index[j++] = (reg_val >> 16) & 0xff;
-               indir->ring_index[j++] = (reg_val >> 24) & 0xff;
-       }
-       indir->size = ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4);
-       return 0;
+       lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
+       if (!lut)
+               return -ENOMEM;
+       ret = i40e_get_rss(vsi, NULL, lut, I40E_HLUT_ARRAY_SIZE);
+       if (ret)
+               goto out;
+       for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+               indir->ring_index[i] = (u32)(lut[i]);
+       indir->size = I40E_HLUT_ARRAY_SIZE;
+
+out:
+       kfree(lut);
+
+       return ret;
 }
 
 #endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
 }
 
 #endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
@@ -3366,10 +4259,8 @@ static int i40e_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
-       struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       u32 reg_val;
-       int i, j;
+       u8 *seed = NULL;
+       u16 i;
 
 #ifdef HAVE_RXFH_HASHFUNC
        if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
 
 #ifdef HAVE_RXFH_HASHFUNC
        if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
@@ -3380,29 +4271,34 @@ static int i40e_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
 
        /* Verify user input. */
        for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) {
 
        /* Verify user input. */
        for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) {
-               if (indir[i] >= pf->rss_size)
+               if (indir[i] >= vsi->rss_size)
                        return -EINVAL;
        }
 
                        return -EINVAL;
        }
 
-       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
-               reg_val = indir[j++];
-               reg_val |= indir[j++] << 8;
-               reg_val |= indir[j++] << 16;
-               reg_val |= indir[j++] << 24;
-               wr32(hw, I40E_PFQF_HLUT(i), reg_val);
-       }
 #if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
        if (key) {
 #if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
        if (key) {
-               for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
-                       reg_val = key[j++];
-                       reg_val |= key[j++] << 8;
-                       reg_val |= key[j++] << 16;
-                       reg_val |= key[j++] << 24;
-                       wr32(hw, I40E_PFQF_HKEY(i), reg_val);
+               if (!vsi->rss_hkey_user) {
+                       vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
+                                                    GFP_KERNEL);
+                       if (!vsi->rss_hkey_user)
+                               return -ENOMEM;
                }
                }
+               memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
+               seed = vsi->rss_hkey_user;
        }
 #endif
        }
 #endif
-       return 0;
+       if (!vsi->rss_lut_user) {
+               vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
+               if (!vsi->rss_lut_user)
+                       return -ENOMEM;
+       }
+
+       /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+       for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+               vsi->rss_lut_user[i] = (u8)(indir[i]);
+
+       return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
+                              I40E_HLUT_ARRAY_SIZE);
 }
 #else
 /**
 }
 #else
 /**
@@ -3418,29 +4314,29 @@ static int i40e_set_rxfh_indir(struct net_device *netdev,
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
-       struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       u32 reg_val;
-       int i, j;
+       u16 i;
 
        if (indir->size < I40E_HLUT_ARRAY_SIZE)
                return -EINVAL;
 
        /* Verify user input. */
        for (i = 0; i < (I40E_PFQF_HLUT_MAX_INDEX + 1) * 4; i++) {
 
        if (indir->size < I40E_HLUT_ARRAY_SIZE)
                return -EINVAL;
 
        /* Verify user input. */
        for (i = 0; i < (I40E_PFQF_HLUT_MAX_INDEX + 1) * 4; i++) {
-               if (indir->ring_index[i] >= pf->rss_size)
+               if (indir->ring_index[i] >= vsi->rss_size)
                        return -EINVAL;
        }
 
                        return -EINVAL;
        }
 
-       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
-               reg_val = indir->ring_index[j++];
-               reg_val |= indir->ring_index[j++] << 8;
-               reg_val |= indir->ring_index[j++] << 16;
-               reg_val |= indir->ring_index[j++] << 24;
-               wr32(hw, I40E_PFQF_HLUT(i), reg_val);
+       if (!vsi->rss_lut_user) {
+               vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
+               if (!vsi->rss_lut_user)
+                       return -ENOMEM;
        }
 
        }
 
-       return 0;
+       /* Each 32 bits pointed by 'ring_index' is stored with a lut entry */
+       for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+               vsi->rss_lut_user[i] = (u8)(indir->ring_index[i]);
+
+       return i40e_config_rss(vsi, NULL, vsi->rss_lut_user,
+                              I40E_HLUT_ARRAY_SIZE);
 }
 #endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
 #endif /* ETHTOOL_SRXFHINDIR */
 }
 #endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
 #endif /* ETHTOOL_SRXFHINDIR */
@@ -3471,6 +4367,12 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
                I40E_PRIV_FLAGS_FD_ATR : 0;
        ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
                I40E_PRIV_FLAGS_VEB_STATS : 0;
                I40E_PRIV_FLAGS_FD_ATR : 0;
        ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
                I40E_PRIV_FLAGS_VEB_STATS : 0;
+       ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
+               0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
+       if (pf->hw.pf_id == 0) {
+               ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
+                       I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+       }
 
        return ret_flags;
 }
 
        return ret_flags;
 }
@@ -3485,6 +4387,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
+       u16 sw_flags = 0, valid_flags = 0;
+       bool reset_required = false;
+       bool promisc_change = false;
+       int ret;
+
+       /* NOTE: MFP is not settable */
 
        if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
                pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
 
        if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
                pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
@@ -3502,10 +4410,52 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
                pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
        }
 
                pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
        }
 
-       if (flags & I40E_PRIV_FLAGS_VEB_STATS)
+       if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
+           !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
                pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
                pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
-       else
+               reset_required = true;
+       } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) &&
+                  (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
                pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
                pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+               reset_required = true;
+       }
+
+       if (pf->hw.pf_id == 0) {
+               if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+                   !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+                       pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
+                       promisc_change = true;
+               } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+                          (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+                       pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
+                       promisc_change = true;
+               }
+       }
+       if (promisc_change) {
+               if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+                       sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+               valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+               ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
+                                               NULL);
+               if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+                       dev_info(&pf->pdev->dev,
+                                "couldn't set switch config bits, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       /* not a fatal problem, just keep going */
+               }
+       }
+
+       if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
+                                  (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
+               pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+       else
+               pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+
+       /* if needed, issue reset to cause things to take effect */
+       if (reset_required)
+               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
        return 0;
 }
 
        return 0;
 }
@@ -3537,10 +4487,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
        .set_tx_csum            = i40e_set_tx_csum,
        .get_sg                 = ethtool_op_get_sg,
        .set_sg                 = ethtool_op_set_sg,
        .set_tx_csum            = i40e_set_tx_csum,
        .get_sg                 = ethtool_op_get_sg,
        .set_sg                 = ethtool_op_set_sg,
-#ifdef NETIF_F_TSO
        .get_tso                = ethtool_op_get_tso,
        .set_tso                = i40e_set_tso,
        .get_tso                = ethtool_op_get_tso,
        .set_tso                = i40e_set_tso,
-#endif
 #ifdef ETHTOOL_GFLAGS
        .get_flags              = ethtool_op_get_flags,
        .set_flags              = i40e_set_flags,
 #ifdef ETHTOOL_GFLAGS
        .get_flags              = ethtool_op_get_flags,
        .set_flags              = i40e_set_flags,
similarity index 97%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_fcoe.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_fcoe.c
index 61dcba6271b87b54f7154fb21e811b8f9de09d6e..d6b8f9b49f581171e2503c538d7afdab7b8164aa 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -24,6 +21,8 @@
  *
  ******************************************************************************/
 
  *
  ******************************************************************************/
 
+#ifdef WITH_FCOE
+
 #include <linux/if_ether.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <linux/if_ether.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -45,16 +44,6 @@ static inline bool i40e_rx_is_fip(u16 ptype)
        return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
 }
 
        return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
 }
 
-/**
- * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
- * @ptype: the packet type field from rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
-       return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
-              (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
 /**
  * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
  * @sof: the FCoE start of frame delimiter
 /**
  * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
  * @sof: the FCoE start of frame delimiter
@@ -303,11 +292,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf)
        }
 
        /* enable FCoE hash filter */
        }
 
        /* enable FCoE hash filter */
-       val = rd32(hw, I40E_PFQF_HENA(1));
+       val = i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1));
        val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
        val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
        val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
        val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
        val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
        val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
-       wr32(hw, I40E_PFQF_HENA(1), val);
+       i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), val);
 
        /* enable flag */
        pf->flags |= I40E_FLAG_FCOE_ENABLED;
 
        /* enable flag */
        pf->flags |= I40E_FLAG_FCOE_ENABLED;
@@ -325,11 +314,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf)
        pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
 
        /* Setup max frame with FCoE_MTU plus L2 overheads */
        pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
 
        /* Setup max frame with FCoE_MTU plus L2 overheads */
-       val = rd32(hw, I40E_GLFCOE_RCTL);
+       val = i40e_read_rx_ctl(hw, I40E_GLFCOE_RCTL);
        val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
        val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
                 << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
        val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
        val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
                 << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
-       wr32(hw, I40E_GLFCOE_RCTL, val);
+       i40e_write_rx_ctl(hw, I40E_GLFCOE_RCTL, val);
 
        dev_info(&pf->pdev->dev, "FCoE is supported.\n");
 }
 
        dev_info(&pf->pdev->dev, "FCoE is supported.\n");
 }
@@ -1376,16 +1365,32 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
        struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
        struct i40e_tx_buffer *first;
        u32 tx_flags = 0;
        struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
        struct i40e_tx_buffer *first;
        u32 tx_flags = 0;
+       int fso, count;
        u8 hdr_len = 0;
        u8 sof = 0;
        u8 eof = 0;
        u8 hdr_len = 0;
        u8 sof = 0;
        u8 eof = 0;
-       int fso;
 
        if (i40e_fcoe_set_skb_header(skb))
                goto out_drop;
 
 
        if (i40e_fcoe_set_skb_header(skb))
                goto out_drop;
 
-       if (!i40e_xmit_descriptor_count(skb, tx_ring))
+       count = i40e_xmit_descriptor_count(skb);
+       if (i40e_chk_linearize(skb, count)) {
+               if (__skb_linearize(skb))
+                       goto out_drop;
+               count = i40e_txd_use_count(skb->len);
+               tx_ring->tx_stats.tx_linearize++;
+       }
+
+       /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+        *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+        *       + 4 desc gap to avoid the cache line where head is,
+        *       + 1 desc for context descriptor,
+        * otherwise try next time
+        */
+       if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+               tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
                return NETDEV_TX_BUSY;
+       }
 
        /* prepare the xmit flags */
        if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
 
        /* prepare the xmit flags */
        if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
@@ -1494,7 +1499,11 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = {
        .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
 #ifdef HAVE_SETUP_TC
        .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
 #ifdef HAVE_SETUP_TC
+#ifdef NETIF_F_HW_TC
+       .ndo_setup_tc           = __i40e_setup_tc,
+#else
        .ndo_setup_tc           = i40e_setup_tc,
        .ndo_setup_tc           = i40e_setup_tc,
+#endif
 #endif /* HAVE_SETUP_TC */
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 #endif /* HAVE_SETUP_TC */
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1625,8 +1634,6 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
        if (!(pf->flags & I40E_FLAG_FCOE_ENABLED))
                return;
 
        if (!(pf->flags & I40E_FLAG_FCOE_ENABLED))
                return;
 
-       BUG_ON(!pf->vsi[pf->lan_vsi]);
-
        for (i = 0; i < pf->num_alloc_vsi; i++) {
                vsi = pf->vsi[i];
                if (vsi && vsi->type == I40E_VSI_FCOE) {
        for (i = 0; i < pf->num_alloc_vsi; i++) {
                vsi = pf->vsi[i];
                if (vsi && vsi->type == I40E_VSI_FCOE) {
@@ -1646,3 +1653,4 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
                dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
        }
 }
                dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
        }
 }
+#endif /* WITH_FCOE */
similarity index 94%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_fcoe.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_fcoe.h
index 88a2bac1b1ff581fe00f678352a5be93553cc82a..91ae783eef1d05cdcad8debfce96ac7446c72288 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
similarity index 88%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_helper.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_helper.h
index 003291aa7dd424eefb54df6577c56354d54a9182..dfae85cd02dc52a89a64d4b05602b2015e7f8e18 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -27,6 +24,8 @@
 #ifndef _I40E_HELPER_H_
 #define _I40E_HELPER_H_
 
 #ifndef _I40E_HELPER_H_
 #define _I40E_HELPER_H_
 
+#include "i40e_alloc.h"
+
 /**
  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
  * @hw:   pointer to the HW structure
 /**
  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
  * @hw:   pointer to the HW structure
@@ -36,6 +35,7 @@
  **/
 inline int i40e_allocate_dma_mem_d(struct i40e_hw *hw,
                                   struct i40e_dma_mem *mem,
  **/
 inline int i40e_allocate_dma_mem_d(struct i40e_hw *hw,
                                   struct i40e_dma_mem *mem,
+                                  __always_unused enum i40e_memory_type mtype,
                                   u64 size, u32 alignment)
 {
        struct i40e_pf *nf = (struct i40e_pf *)hw->back;
                                   u64 size, u32 alignment)
 {
        struct i40e_pf *nf = (struct i40e_pf *)hw->back;
@@ -100,11 +100,16 @@ inline int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
        return 0;
 }
 
        return 0;
 }
 
+/* prototype */
+inline void i40e_destroy_spinlock_d(struct i40e_spinlock *sp);
+inline void i40e_acquire_spinlock_d(struct i40e_spinlock *sp);
+inline void i40e_release_spinlock_d(struct i40e_spinlock *sp);
+
 /**
  * i40e_init_spinlock_d - OS specific spinlock init for shared code
  * @sp: pointer to a spinlock declared in driver space
  **/
 /**
  * i40e_init_spinlock_d - OS specific spinlock init for shared code
  * @sp: pointer to a spinlock declared in driver space
  **/
-inline void i40e_init_spinlock_d(struct i40e_spinlock *sp)
+static inline void i40e_init_spinlock_d(struct i40e_spinlock *sp)
 {
        mutex_init((struct mutex *)sp);
 }
 {
        mutex_init((struct mutex *)sp);
 }
similarity index 97%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_hmc.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_hmc.c
index 948949c55b17a35cc09d575cb8b5c5b88352affd..d87dbd371926c176a809f832de247fb3bfb78d95 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
similarity index 97%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_hmc.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_hmc.h
index ccfc19ce400ceaf3df3929a83451e1db06f886b2..c0708a72177c187aa7afbe86010ae7be59006785 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
similarity index 99%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_lan_hmc.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_lan_hmc.c
index 96ca9e53a9a8dc22d3a91fbf66ba94c0fa26d231..2f6d5e0af76628d52a3cef0707f5747467de490b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -762,7 +759,7 @@ static void i40e_write_byte(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = BIT(ce_info->width) - 1;
+       mask = (u8)(BIT(ce_info->width) - 1);
 
        src_byte = *from;
        src_byte &= mask;
 
        src_byte = *from;
        src_byte &= mask;
@@ -947,7 +944,7 @@ static void i40e_read_byte(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = BIT(ce_info->width) - 1;
+       mask = (u8)(BIT(ce_info->width) - 1);
 
        /* shift to correct alignment */
        mask <<= shift_width;
 
        /* shift to correct alignment */
        mask <<= shift_width;
similarity index 95%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_lan_hmc.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_lan_hmc.h
index efbf5860102e248b87b7a40063a6102082ea474a..efdb172f7618e592b7dfe822fe6cac1641fdb55d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
similarity index 89%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_main.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_main.c
index fc9b3dbb6e66f28bb58312454c67ae97533d97f4..d0051cf54a2acd8bfc30586f6258c0fca2ea7593 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
 #include "i40e_helper.h"
 #include "i40e_diag.h"
 #ifdef HAVE_VXLAN_RX_OFFLOAD
 #include "i40e_helper.h"
 #include "i40e_diag.h"
 #ifdef HAVE_VXLAN_RX_OFFLOAD
-#ifdef HAVE_VXLAN_CHECKS
-#include <net/vxlan.h>
-#else
 #if IS_ENABLED(CONFIG_VXLAN)
 #include <net/vxlan.h>
 #endif
 #if IS_ENABLED(CONFIG_VXLAN)
 #include <net/vxlan.h>
 #endif
-#endif /* HAVE_VXLAN_CHECKS */
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
+#include <net/gre.h>
+#endif /* HAVE_GRE_ENCAP_OFFLOAD */
+#ifdef HAVE_GENEVE_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_GENEVE)
+#include <net/geneve.h>
+#endif
+#endif /* HAVE_GENEVE_RX_OFFLOAD */
 
 char i40e_driver_name[] = "i40e";
 static const char i40e_driver_string[] =
 
 char i40e_driver_name[] = "i40e";
 static const char i40e_driver_string[] =
-                       "Intel(R) Ethernet Connection XL710 Network Driver";
-
-#define DRV_HW_PERF
-#define DRV_FPGA
-#define DRV_X722
-#define DRV_A0
-#ifdef I40E_MSI_INTERRUPT
-#define DRV_KERN "-msi"
-#else
-#ifdef I40E_LEGACY_INTERRUPT
-#define DRV_KERN "-legacy"
-#else
-#define DRV_KERN
-#endif
-#endif
+               "Intel(R) 40-10 Gigabit Ethernet Connection Network Driver";
+
+#ifndef DRV_VERSION_LOCAL
+#define DRV_VERSION_LOCAL
+#endif /* DRV_VERSION_LOCAL */
+
+#define DRV_VERSION_DESC ""
 
 #define DRV_VERSION_MAJOR 1
 
 #define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 47
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 18
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
-            __stringify(DRV_VERSION_MINOR) "." \
-            __stringify(DRV_VERSION_BUILD) DRV_HW_PERF DRV_FPGA DRV_X722 DRV_A0 DRV_KERN
+       __stringify(DRV_VERSION_MINOR) "." \
+       __stringify(DRV_VERSION_BUILD) \
+       DRV_VERSION_DESC __stringify(DRV_VERSION_LOCAL)
 const char i40e_driver_version_str[] = DRV_VERSION;
 const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright (c) 2013 - 2015 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright(c) 2013 - 2016 Intel Corporation.";
 
 /* a bit of forward declarations */
 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
 
 /* a bit of forward declarations */
 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
@@ -74,6 +68,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
 static int i40e_setup_misc_vector(struct i40e_pf *pf);
 static void i40e_determine_queue_usage(struct i40e_pf *pf);
 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
 static int i40e_setup_misc_vector(struct i40e_pf *pf);
 static void i40e_determine_queue_usage(struct i40e_pf *pf);
 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+                             u16 rss_table_size, u16 rss_size);
+static void i40e_clear_rss_config_user(struct i40e_vsi *vsi);
 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
 /* i40e_pci_tbl - PCI Device ID Table
 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
 /* i40e_pci_tbl - PCI Device ID Table
@@ -86,7 +83,6 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
 static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
 static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
-       {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
@@ -96,6 +92,15 @@ static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
+#ifdef X722_DEV_SUPPORT
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},
+#endif /* X722_DEV_SUPPORT */
        /* required last entry */
        {0, }
 };
        /* required last entry */
        {0, }
 };
@@ -121,7 +126,7 @@ module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
-MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_DESCRIPTION("Intel(R) 40-10 Gigabit Ethernet Connection Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
@@ -348,24 +353,6 @@ static void i40e_tx_timeout(struct net_device *netdev)
        pf->tx_timeout_recovery_level++;
 }
 
        pf->tx_timeout_recovery_level++;
 }
 
-/**
- * i40e_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
- **/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
-{
-       rx_ring->next_to_use = val;
-
-       /* Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
-        */
-       wmb();
-       writel(val, rx_ring->tail);
-}
-
 /**
  * i40e_get_vsi_stats_struct - Get System Network Statistics
  * @vsi: the VSI we care about
 /**
  * i40e_get_vsi_stats_struct - Get System Network Statistics
  * @vsi: the VSI we care about
@@ -765,7 +752,7 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
        if (vsi->type != I40E_VSI_FCOE)
                return;
 
        if (vsi->type != I40E_VSI_FCOE)
                return;
 
-       idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
+       idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
        fs = &vsi->fcoe_stats;
        ofs = &vsi->fcoe_stats_offsets;
 
        fs = &vsi->fcoe_stats;
        ofs = &vsi->fcoe_stats_offsets;
 
@@ -798,106 +785,6 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
 }
 
 #endif
 }
 
 #endif
-/**
- * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
- * @pf: the corresponding PF
- *
- * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
- **/
-static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
-{
-       struct i40e_hw_port_stats *osd = &pf->stats_offsets;
-       struct i40e_hw_port_stats *nsd = &pf->stats;
-       struct i40e_hw *hw = &pf->hw;
-       u64 xoff = 0;
-       u16 i, v;
-
-       if ((hw->fc.current_mode != I40E_FC_FULL) &&
-           (hw->fc.current_mode != I40E_FC_RX_PAUSE))
-               return;
-
-       xoff = nsd->link_xoff_rx;
-       i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
-                          pf->stat_offsets_loaded,
-                          &osd->link_xoff_rx, &nsd->link_xoff_rx);
-
-       /* No new LFC xoff rx */
-       if (!(nsd->link_xoff_rx - xoff))
-               return;
-
-       /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
-       for (v = 0; v < pf->num_alloc_vsi; v++) {
-               struct i40e_vsi *vsi = pf->vsi[v];
-
-               if (!vsi || !vsi->tx_rings[0])
-                       continue;
-
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       struct i40e_ring *ring = vsi->tx_rings[i];
-
-                       clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
-               }
-       }
-}
-
-/**
- * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
- * @pf: the corresponding PF
- *
- * Update the Rx XOFF counter (PAUSE frames) in PFC mode
- **/
-static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
-{
-       struct i40e_hw_port_stats *osd = &pf->stats_offsets;
-       struct i40e_hw_port_stats *nsd = &pf->stats;
-       bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
-       struct i40e_dcbx_config *dcb_cfg;
-       struct i40e_hw *hw = &pf->hw;
-       u16 i, v;
-       u8 tc;
-
-       dcb_cfg = &hw->local_dcbx_config;
-
-       /* Collect Link XOFF stats when PFC is disabled */
-       if (!dcb_cfg->pfc.pfcenable) {
-               i40e_update_link_xoff_rx(pf);
-               return;
-       }
-
-       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
-               u64 prio_xoff = nsd->priority_xoff_rx[i];
-
-               i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
-                                  pf->stat_offsets_loaded,
-                                  &osd->priority_xoff_rx[i],
-                                  &nsd->priority_xoff_rx[i]);
-
-               /* No new PFC xoff rx */
-               if (!(nsd->priority_xoff_rx[i] - prio_xoff))
-                       continue;
-               /* Get the TC for given priority */
-               tc = dcb_cfg->etscfg.prioritytable[i];
-               xoff[tc] = true;
-       }
-
-       /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
-       for (v = 0; v < pf->num_alloc_vsi; v++) {
-               struct i40e_vsi *vsi = pf->vsi[v];
-
-               if (!vsi || !vsi->tx_rings[0])
-                       continue;
-
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       struct i40e_ring *ring = vsi->tx_rings[i];
-
-                       tc = ring->dcb_tc;
-                       if (xoff[tc])
-                               clear_bit(__I40E_HANG_CHECK_ARMED,
-                                         &ring->state);
-               }
-       }
-}
-
 /**
  * i40e_update_vsi_stats - Update the vsi statistics counters.
  * @vsi: the VSI to be updated
 /**
  * i40e_update_vsi_stats - Update the vsi statistics counters.
  * @vsi: the VSI to be updated
@@ -921,6 +808,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        struct i40e_eth_stats *oes;
        struct i40e_eth_stats *es;     /* device's eth stats */
        u32 tx_restart, tx_busy;
        struct i40e_eth_stats *oes;
        struct i40e_eth_stats *es;     /* device's eth stats */
        u32 tx_restart, tx_busy;
+       u64 tx_lost_interrupt;
        struct i40e_ring *p;
        u32 rx_page, rx_buf;
        u64 bytes, packets;
        struct i40e_ring *p;
        u32 rx_page, rx_buf;
        u64 bytes, packets;
@@ -928,6 +816,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        unsigned int start;
 #endif
        u64 tx_linearize;
        unsigned int start;
 #endif
        u64 tx_linearize;
+       u64 tx_force_wb;
        u64 rx_p, rx_b;
        u64 tx_p, tx_b;
        u16 q;
        u64 rx_p, rx_b;
        u64 tx_p, tx_b;
        u16 q;
@@ -946,7 +835,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
         */
        rx_b = rx_p = 0;
        tx_b = tx_p = 0;
         */
        rx_b = rx_p = 0;
        tx_b = tx_p = 0;
-       tx_restart = tx_busy = tx_linearize = 0;
+       tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
+       tx_lost_interrupt = 0;
        rx_page = 0;
        rx_buf = 0;
        rcu_read_lock();
        rx_page = 0;
        rx_buf = 0;
        rcu_read_lock();
@@ -968,6 +858,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                tx_restart += p->tx_stats.restart_queue;
                tx_busy += p->tx_stats.tx_busy;
                tx_linearize += p->tx_stats.tx_linearize;
                tx_restart += p->tx_stats.restart_queue;
                tx_busy += p->tx_stats.tx_busy;
                tx_linearize += p->tx_stats.tx_linearize;
+               tx_force_wb += p->tx_stats.tx_force_wb;
+               tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
 
                /* Rx queue is part of the same block as Tx queue */
                p = &p[1];
 
                /* Rx queue is part of the same block as Tx queue */
                p = &p[1];
@@ -989,6 +881,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        vsi->tx_restart = tx_restart;
        vsi->tx_busy = tx_busy;
        vsi->tx_linearize = tx_linearize;
        vsi->tx_restart = tx_restart;
        vsi->tx_busy = tx_busy;
        vsi->tx_linearize = tx_linearize;
+       vsi->tx_force_wb = tx_force_wb;
+       vsi->tx_lost_interrupt = tx_lost_interrupt;
        vsi->rx_page_failed = rx_page;
        vsi->rx_buf_failed = rx_buf;
 
        vsi->rx_page_failed = rx_page;
        vsi->rx_buf_failed = rx_buf;
 
@@ -1104,12 +998,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
        i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
                           pf->stat_offsets_loaded,
                           &osd->link_xon_tx, &nsd->link_xon_tx);
        i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
                           pf->stat_offsets_loaded,
                           &osd->link_xon_tx, &nsd->link_xon_tx);
-       i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
+       i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->link_xoff_rx, &nsd->link_xoff_rx);
        i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
                           pf->stat_offsets_loaded,
                           &osd->link_xoff_tx, &nsd->link_xoff_tx);
 
        for (i = 0; i < 8; i++) {
        i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
                           pf->stat_offsets_loaded,
                           &osd->link_xoff_tx, &nsd->link_xoff_tx);
 
        for (i = 0; i < 8; i++) {
+               i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+                                  pf->stat_offsets_loaded,
+                                  &osd->priority_xoff_rx[i],
+                                  &nsd->priority_xoff_rx[i]);
                i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
                                   pf->stat_offsets_loaded,
                                   &osd->priority_xon_rx[i],
                i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
                                   pf->stat_offsets_loaded,
                                   &osd->priority_xon_rx[i],
@@ -1280,9 +1180,6 @@ struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
 {
        struct i40e_mac_filter *f;
 
 {
        struct i40e_mac_filter *f;
 
-       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
-               "Missing mac_filter_list_lock\n");
-
        if (!vsi || !macaddr)
                return NULL;
 
        if (!vsi || !macaddr)
                return NULL;
 
@@ -1311,9 +1208,6 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
 {
        struct i40e_mac_filter *f;
 
 {
        struct i40e_mac_filter *f;
 
-       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
-               "Missing mac_filter_list_lock\n");
-
        if (!vsi || !macaddr)
                return NULL;
 
        if (!vsi || !macaddr)
                return NULL;
 
@@ -1336,9 +1230,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
 {
        struct i40e_mac_filter *f;
 
 {
        struct i40e_mac_filter *f;
 
-       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
-               "Missing mac_filter_list_lock\n");
-
        /* Only -1 for all the filters denotes not in vlan mode
         * so we have to go through all the list in order to make sure
         */
        /* Only -1 for all the filters denotes not in vlan mode
         * so we have to go through all the list in order to make sure
         */
@@ -1367,9 +1258,6 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
 {
        struct i40e_mac_filter *f;
 
 {
        struct i40e_mac_filter *f;
 
-       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
-               "Missing mac_filter_list_lock\n");
-
        list_for_each_entry(f, &vsi->mac_filter_list, list) {
                if (vsi->info.pvid)
                        f->vlan = le16_to_cpu(vsi->info.pvid);
        list_for_each_entry(f, &vsi->mac_filter_list, list) {
                if (vsi->info.pvid)
                        f->vlan = le16_to_cpu(vsi->info.pvid);
@@ -1385,6 +1273,40 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
                                        struct i40e_mac_filter, list);
 }
 
                                        struct i40e_mac_filter, list);
 }
 
+/**
+ * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be removed
+ * @is_vf: true if it is a VF
+ * @is_netdev: true if it is a netdev
+ *
+ * Removes a given MAC address from a VSI, regardless of VLAN
+ *
+ * Returns 0 for success, or error
+ **/
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+                         bool is_vf, bool is_netdev)
+{
+       struct i40e_mac_filter *f = NULL;
+       int changed = 0;
+
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if ((ether_addr_equal(macaddr, f->macaddr)) &&
+                   (is_vf == f->is_vf) &&
+                   (is_netdev == f->is_netdev)) {
+                       f->counter--;
+                       f->changed = true;
+                       changed = 1;
+               }
+       }
+       if (changed) {
+               vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+               vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+               return 0;
+       }
+       return -ENOENT;
+}
+
 /**
  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
  * @vsi: the PF Main VSI - inappropriate for any other VSI
 /**
  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
  * @vsi: the PF Main VSI - inappropriate for any other VSI
@@ -1426,8 +1348,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
  * Returns ptr to the filter object or NULL when no memory available.
  *
  * NOTE: This function is expected to be called with mac_filter_list_lock
  * Returns ptr to the filter object or NULL when no memory available.
  *
  * NOTE: This function is expected to be called with mac_filter_list_lock
- * being held. If needed could add WARN/BUG_ON if lock is not held for debug
- * purpose.
+ * being held.
  **/
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                                        u8 *macaddr, s16 vlan,
  **/
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                                        u8 *macaddr, s16 vlan,
@@ -1435,9 +1356,6 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
 {
        struct i40e_mac_filter *f;
 
 {
        struct i40e_mac_filter *f;
 
-       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
-               "Missing mac_filter_list_lock\n");
-
        if (!vsi || !macaddr)
                return NULL;
 
        if (!vsi || !macaddr)
                return NULL;
 
@@ -1452,7 +1370,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                f->changed = true;
 
                INIT_LIST_HEAD(&f->list);
                f->changed = true;
 
                INIT_LIST_HEAD(&f->list);
-               list_add(&f->list, &vsi->mac_filter_list);
+               list_add_tail(&f->list, &vsi->mac_filter_list);
        }
 
        /* increment counter and add a new flag if needed */
        }
 
        /* increment counter and add a new flag if needed */
@@ -1491,8 +1409,7 @@ add_filter_out:
  * @is_netdev: make sure it's a netdev filter, else doesn't matter
  *
  * NOTE: This function is expected to be called with mac_filter_list_lock
  * @is_netdev: make sure it's a netdev filter, else doesn't matter
  *
  * NOTE: This function is expected to be called with mac_filter_list_lock
- * being held. If needed could add WARN/BUG_ON if lock is not held for debug
- * purpose.
+ * being held.
  **/
 void i40e_del_filter(struct i40e_vsi *vsi,
                     u8 *macaddr, s16 vlan,
  **/
 void i40e_del_filter(struct i40e_vsi *vsi,
                     u8 *macaddr, s16 vlan,
@@ -1500,9 +1417,6 @@ void i40e_del_filter(struct i40e_vsi *vsi,
 {
        struct i40e_mac_filter *f;
 
 {
        struct i40e_mac_filter *f;
 
-       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
-               "Missing mac_filter_list_lock\n");
-
        if (!vsi || !macaddr)
                return;
 
        if (!vsi || !macaddr)
                return;
 
@@ -1626,7 +1540,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
 
        ether_addr_copy(netdev->dev_addr, addr->sa_data);
 
 
        ether_addr_copy(netdev->dev_addr, addr->sa_data);
 
-       return i40e_sync_vsi_filters(vsi, false);
+       /* schedule our worker thread which will take care of
+        * applying the new filter changes
+        */
+       i40e_service_event_schedule(vsi->back);
+       return 0;
 }
 
 /**
 }
 
 /**
@@ -1666,7 +1584,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
                /* Find numtc from enabled TC bitmap */
                for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
        if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
                /* Find numtc from enabled TC bitmap */
                for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                       if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
+                       if (enabled_tc & BIT(i)) /* TC is enabled */
                                numtc++;
                }
                if (!numtc) {
                                numtc++;
                }
                if (!numtc) {
@@ -1696,12 +1614,13 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        /* Setup queue offset/count for all TCs for given VSI */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
                /* See if the given TC is enabled for the given VSI */
        /* Setup queue offset/count for all TCs for given VSI */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
                /* See if the given TC is enabled for the given VSI */
-               if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { /* TC is enabled */
+               if (vsi->tc_config.enabled_tc & BIT(i)) { /* TC is enabled */
                        int pow, num_qps;
 
                        switch (vsi->type) {
                        case I40E_VSI_MAIN:
                        int pow, num_qps;
 
                        switch (vsi->type) {
                        case I40E_VSI_MAIN:
-                               qcount = min_t(int, pf->rss_size, num_tc_qps);
+                               qcount = min_t(int, pf->alloc_rss_size,
+                                              num_tc_qps);
                                break;
 #ifdef I40E_FCOE
                        case I40E_VSI_FCOE:
                                break;
 #ifdef I40E_FCOE
                        case I40E_VSI_FCOE:
@@ -1872,6 +1791,11 @@ bottom_of_search_loop:
                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
                vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
        }
                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
                vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
        }
+
+       /* schedule our worker thread which will take care of
+        * applying the new filter changes
+        */
+       i40e_service_event_schedule(vsi->back);
 }
 
 /**
 }
 
 /**
@@ -1950,13 +1874,12 @@ static void i40e_cleanup_add_list(struct list_head *add_list)
 /**
  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
  * @vsi: ptr to the VSI
 /**
  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
  * @vsi: ptr to the VSI
- * @grab_rtnl: whether RTNL needs to be grabbed
  *
  * Push any outstanding VSI filter changes through the AdminQ.
  *
  * Returns 0 or error value
  **/
  *
  * Push any outstanding VSI filter changes through the AdminQ.
  *
  * Returns 0 or error value
  **/
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 {
        struct list_head tmp_del_list, tmp_add_list;
        struct i40e_mac_filter *f, *ftmp, *fclone;
 {
        struct list_head tmp_del_list, tmp_add_list;
        struct i40e_mac_filter *f, *ftmp, *fclone;
@@ -1964,8 +1887,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
        bool add_happened = false;
        int filter_list_len = 0;
        u32 changed_flags = 0;
        bool add_happened = false;
        int filter_list_len = 0;
        u32 changed_flags = 0;
+       i40e_status aq_ret = 0;
        bool err_cond = false;
        bool err_cond = false;
-       i40e_status ret = 0;
+       int retval = 0;
        struct i40e_pf *pf;
        int num_add = 0;
        int num_del = 0;
        struct i40e_pf *pf;
        int num_add = 0;
        int num_del = 0;
@@ -2028,18 +1952,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                }
                spin_unlock_bh(&vsi->mac_filter_list_lock);
 
                }
                spin_unlock_bh(&vsi->mac_filter_list_lock);
 
-               if (err_cond)
+               if (err_cond) {
                        i40e_cleanup_add_list(&tmp_add_list);
                        i40e_cleanup_add_list(&tmp_add_list);
+                       retval = -ENOMEM;
+                       goto out;
+               }
        }
 
        /* Now process 'del_list' outside the lock */
        if (!list_empty(&tmp_del_list)) {
        }
 
        /* Now process 'del_list' outside the lock */
        if (!list_empty(&tmp_del_list)) {
+               int del_list_size;
 
                filter_list_len = pf->hw.aq.asq_buf_size /
                            sizeof(struct i40e_aqc_remove_macvlan_element_data);
 
                filter_list_len = pf->hw.aq.asq_buf_size /
                            sizeof(struct i40e_aqc_remove_macvlan_element_data);
-               del_list = kcalloc(filter_list_len,
-                           sizeof(struct i40e_aqc_remove_macvlan_element_data),
-                           GFP_KERNEL);
+               del_list_size = filter_list_len *
+                           sizeof(struct i40e_aqc_remove_macvlan_element_data);
+               del_list = kzalloc(del_list_size, GFP_ATOMIC);
                if (!del_list) {
                        i40e_cleanup_add_list(&tmp_add_list);
 
                if (!del_list) {
                        i40e_cleanup_add_list(&tmp_add_list);
 
@@ -2048,8 +1976,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                        i40e_undo_del_filter_entries(vsi, &tmp_del_list);
                        i40e_undo_add_filter_entries(vsi);
                        spin_unlock_bh(&vsi->mac_filter_list_lock);
                        i40e_undo_del_filter_entries(vsi, &tmp_del_list);
                        i40e_undo_add_filter_entries(vsi);
                        spin_unlock_bh(&vsi->mac_filter_list_lock);
-                       vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
-                       return -ENOMEM;
+                       retval = -ENOMEM;
+                       goto out;
                }
 
                list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
                }
 
                list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
@@ -2067,18 +1995,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
 
                        /* flush a full buffer */
                        if (num_del == filter_list_len) {
 
                        /* flush a full buffer */
                        if (num_del == filter_list_len) {
-                               ret = i40e_aq_remove_macvlan(&pf->hw,
-                                           vsi->seid, del_list, num_del,
-                                           NULL);
+                               aq_ret = i40e_aq_remove_macvlan(&pf->hw,
+                                               vsi->seid, del_list, num_del,
+                                               NULL);
                                aq_err = pf->hw.aq.asq_last_status;
                                num_del = 0;
                                aq_err = pf->hw.aq.asq_last_status;
                                num_del = 0;
-                               memset(del_list, 0, sizeof(*del_list));
+                               memset(del_list, 0, del_list_size);
 
 
-                               if (ret && aq_err != I40E_AQ_RC_ENOENT)
+                               if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
+                                       retval = -EIO;
                                        dev_err(&pf->pdev->dev,
                                                 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
                                        dev_err(&pf->pdev->dev,
                                                 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
-                                                i40e_stat_str(&pf->hw, ret),
+                                                i40e_stat_str(&pf->hw, aq_ret),
                                                 i40e_aq_str(&pf->hw, aq_err));
                                                 i40e_aq_str(&pf->hw, aq_err));
+                               }
                        }
                        /* Release memory for MAC filter entries which were
                         * synced up with HW.
                        }
                        /* Release memory for MAC filter entries which were
                         * synced up with HW.
@@ -2088,15 +2018,15 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                }
 
                if (num_del) {
                }
 
                if (num_del) {
-                       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+                       aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
                                                     del_list, num_del, NULL);
                        aq_err = pf->hw.aq.asq_last_status;
                        num_del = 0;
 
                                                     del_list, num_del, NULL);
                        aq_err = pf->hw.aq.asq_last_status;
                        num_del = 0;
 
-                       if (ret && aq_err != I40E_AQ_RC_ENOENT)
+                       if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
                                dev_info(&pf->pdev->dev,
                                         "ignoring delete macvlan error, err %s aq_err %s\n",
                                dev_info(&pf->pdev->dev,
                                         "ignoring delete macvlan error, err %s aq_err %s\n",
-                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_stat_str(&pf->hw, aq_ret),
                                         i40e_aq_str(&pf->hw, aq_err));
                }
 
                                         i40e_aq_str(&pf->hw, aq_err));
                }
 
@@ -2105,13 +2035,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
        }
 
        if (!list_empty(&tmp_add_list)) {
        }
 
        if (!list_empty(&tmp_add_list)) {
+               int add_list_size;
 
                /* do all the adds now */
                filter_list_len = pf->hw.aq.asq_buf_size /
                               sizeof(struct i40e_aqc_add_macvlan_element_data),
 
                /* do all the adds now */
                filter_list_len = pf->hw.aq.asq_buf_size /
                               sizeof(struct i40e_aqc_add_macvlan_element_data),
-               add_list = kcalloc(filter_list_len,
-                              sizeof(struct i40e_aqc_add_macvlan_element_data),
-                              GFP_KERNEL);
+               add_list_size = filter_list_len *
+                              sizeof(struct i40e_aqc_add_macvlan_element_data);
+               add_list = kzalloc(add_list_size, GFP_ATOMIC);
                if (!add_list) {
                        /* Purge element from temporary lists */
                        i40e_cleanup_add_list(&tmp_add_list);
                if (!add_list) {
                        /* Purge element from temporary lists */
                        i40e_cleanup_add_list(&tmp_add_list);
@@ -2120,8 +2051,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                        spin_lock_bh(&vsi->mac_filter_list_lock);
                        i40e_undo_add_filter_entries(vsi);
                        spin_unlock_bh(&vsi->mac_filter_list_lock);
                        spin_lock_bh(&vsi->mac_filter_list_lock);
                        i40e_undo_add_filter_entries(vsi);
                        spin_unlock_bh(&vsi->mac_filter_list_lock);
-                       vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
-                       return -ENOMEM;
+                       retval = -ENOMEM;
+                       goto out;
                }
 
                list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
                }
 
                list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
@@ -2142,15 +2073,15 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
 
                        /* flush a full buffer */
                        if (num_add == filter_list_len) {
 
                        /* flush a full buffer */
                        if (num_add == filter_list_len) {
-                               ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                         add_list, num_add,
-                                                         NULL);
+                               aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                            add_list, num_add,
+                                                            NULL);
                                aq_err = pf->hw.aq.asq_last_status;
                                num_add = 0;
 
                                aq_err = pf->hw.aq.asq_last_status;
                                num_add = 0;
 
-                               if (ret)
+                               if (aq_ret)
                                        break;
                                        break;
-                               memset(add_list, 0, sizeof(*add_list));
+                               memset(add_list, 0, add_list_size);
                        }
                        /* Entries from tmp_add_list were cloned from MAC
                         * filter list, hence clean those cloned entries
                        }
                        /* Entries from tmp_add_list were cloned from MAC
                         * filter list, hence clean those cloned entries
@@ -2160,18 +2091,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                }
 
                if (num_add) {
                }
 
                if (num_add) {
-                       ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                 add_list, num_add, NULL);
+                       aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                    add_list, num_add, NULL);
                        aq_err = pf->hw.aq.asq_last_status;
                        num_add = 0;
                }
                kfree(add_list);
                add_list = NULL;
 
                        aq_err = pf->hw.aq.asq_last_status;
                        num_add = 0;
                }
                kfree(add_list);
                add_list = NULL;
 
-               if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
+               if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
+                       retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
                        dev_info(&pf->pdev->dev,
                                 "add filter failed, err %s aq_err %s\n",
                        dev_info(&pf->pdev->dev,
                                 "add filter failed, err %s aq_err %s\n",
-                                i40e_stat_str(&pf->hw, ret),
+                                i40e_stat_str(&pf->hw, aq_ret),
                                 i40e_aq_str(&pf->hw, aq_err));
                        if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
                            !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                 i40e_aq_str(&pf->hw, aq_err));
                        if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
                            !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
@@ -2184,21 +2116,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                }
        }
 
                }
        }
 
+       /* if the VF is not trusted do not do promisc */
+       if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
+               clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+               goto out;
+       }
+
        /* check for changes in promiscuous modes */
        if (changed_flags & IFF_ALLMULTI) {
                bool cur_multipromisc;
 
                cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
        /* check for changes in promiscuous modes */
        if (changed_flags & IFF_ALLMULTI) {
                bool cur_multipromisc;
 
                cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
-               ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
                                                               vsi->seid,
                                                               cur_multipromisc,
                                                               NULL);
                                                               vsi->seid,
                                                               cur_multipromisc,
                                                               NULL);
-               if (ret)
+               if (aq_ret) {
+                       retval = i40e_aq_rc_to_posix(aq_ret,
+                                                    pf->hw.aq.asq_last_status);
                        dev_info(&pf->pdev->dev,
                                 "set multi promisc failed, err %s aq_err %s\n",
                        dev_info(&pf->pdev->dev,
                                 "set multi promisc failed, err %s aq_err %s\n",
-                                i40e_stat_str(&pf->hw, ret),
+                                i40e_stat_str(&pf->hw, aq_ret),
                                 i40e_aq_str(&pf->hw,
                                              pf->hw.aq.asq_last_status));
                                 i40e_aq_str(&pf->hw,
                                              pf->hw.aq.asq_last_status));
+               }
        }
        if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
                bool cur_promisc;
        }
        if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
                bool cur_promisc;
@@ -2206,7 +2147,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
                               test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                        &vsi->state));
                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
                               test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                        &vsi->state));
-               if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
+               if ((vsi->type == I40E_VSI_MAIN) &&
+                   (pf->lan_veb != I40E_NO_VEB) &&
+                   !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
                        /*  set defport ON for Main VSI instead of true promisc
                         *  this way we will get all unicast/multicast and vlan
                         *  promisc behavior but will not get VF or VMDq traffic
                        /*  set defport ON for Main VSI instead of true promisc
                         *  this way we will get all unicast/multicast and vlan
                         *  promisc behavior but will not get VF or VMDq traffic
@@ -2217,41 +2160,55 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
                                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
                        }
                } else {
                                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
                        }
                } else {
-                       ret = i40e_aq_set_vsi_unicast_promiscuous(
+                       aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
                                                             &vsi->back->hw,
                                                             vsi->seid,
                                                             &vsi->back->hw,
                                                             vsi->seid,
-                                                            cur_promisc, NULL);
-                       if (ret)
+                                                            cur_promisc, NULL,
+                                                            true);
+                       if (aq_ret) {
+                               retval = i40e_aq_rc_to_posix(aq_ret,
+                                                    pf->hw.aq.asq_last_status);
                                dev_info(&pf->pdev->dev,
                                         "set unicast promisc failed, err %s, aq_err %s\n",
                                dev_info(&pf->pdev->dev,
                                         "set unicast promisc failed, err %s, aq_err %s\n",
-                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_stat_str(&pf->hw, aq_ret),
                                         i40e_aq_str(&pf->hw,
                                         pf->hw.aq.asq_last_status));
                                         i40e_aq_str(&pf->hw,
                                         pf->hw.aq.asq_last_status));
-                       ret = i40e_aq_set_vsi_multicast_promiscuous(
+                       }
+                       aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
                                                                 &vsi->back->hw,
                                                                 vsi->seid,
                                                                 cur_promisc,
                                                                 NULL);
                                                                 &vsi->back->hw,
                                                                 vsi->seid,
                                                                 cur_promisc,
                                                                 NULL);
-                       if (ret)
+                       if (aq_ret) {
+                               retval = i40e_aq_rc_to_posix(aq_ret,
+                                                    pf->hw.aq.asq_last_status);
                                dev_info(&pf->pdev->dev,
                                         "set multicast promisc failed, err %s, aq_err %s\n",
                                dev_info(&pf->pdev->dev,
                                         "set multicast promisc failed, err %s, aq_err %s\n",
-                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_stat_str(&pf->hw, aq_ret),
                                         i40e_aq_str(&pf->hw,
                                         pf->hw.aq.asq_last_status));
                                         i40e_aq_str(&pf->hw,
                                         pf->hw.aq.asq_last_status));
+                       }
                }
                }
-               ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+               aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
                                                   vsi->seid,
                                                   cur_promisc, NULL);
                                                   vsi->seid,
                                                   cur_promisc, NULL);
-               if (ret)
+               if (aq_ret) {
+                       retval = i40e_aq_rc_to_posix(aq_ret,
+                                                    pf->hw.aq.asq_last_status);
                        dev_info(&pf->pdev->dev,
                                 "set brdcast promisc failed, err %s, aq_err %s\n",
                        dev_info(&pf->pdev->dev,
                                 "set brdcast promisc failed, err %s, aq_err %s\n",
-                                i40e_stat_str(&pf->hw, ret),
+                                i40e_stat_str(&pf->hw, aq_ret),
                                 i40e_aq_str(&pf->hw,
                                              pf->hw.aq.asq_last_status));
                                 i40e_aq_str(&pf->hw,
                                              pf->hw.aq.asq_last_status));
+               }
        }
        }
+out:
+       /* if something went wrong then set the changed flag so we try again */
+       if (retval)
+               vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
 
        clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
 
        clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
-       return 0;
+       return retval;
 }
 
 /**
 }
 
 /**
@@ -2270,7 +2227,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v] &&
                    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v] &&
                    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
-                       int ret = i40e_sync_vsi_filters(pf->vsi[v], true);
+                       int ret = i40e_sync_vsi_filters(pf->vsi[v]);
                        if (ret) {
                                /* come back and try again later */
                                pf->flags |= I40E_FLAG_FILTER_SYNC;
                        if (ret) {
                                /* come back and try again later */
                                pf->flags |= I40E_FLAG_FILTER_SYNC;
@@ -2517,16 +2474,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
                }
        }
 
                }
        }
 
-       /* Make sure to release before sync_vsi_filter because that
-        * function will lock/unlock as necessary
-        */
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
-       if (test_bit(__I40E_DOWN, &vsi->back->state) ||
-           test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
-               return 0;
-
-       return i40e_sync_vsi_filters(vsi, false);
+       /* schedule our worker thread which will take care of
+        * applying the new filter changes
+        */
+       i40e_service_event_schedule(vsi->back);
+       return 0;
 }
 
 /**
 }
 
 /**
@@ -2599,16 +2553,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
                }
        }
 
                }
        }
 
-       /* Make sure to release before sync_vsi_filter because that
-        * function with lock/unlock as necessary
-        */
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
-       if (test_bit(__I40E_DOWN, &vsi->back->state) ||
-           test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
-               return 0;
-
-       return i40e_sync_vsi_filters(vsi, false);
+       /* schedule our worker thread which will take care of
+        * applying the new filter changes
+        */
+       i40e_service_event_schedule(vsi->back);
+       return 0;
 }
 
 /**
 }
 
 /**
@@ -2644,7 +2595,9 @@ static void i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
+#ifndef HAVE_VLAN_RX_REGISTER
        int ret = 0;
        int ret = 0;
+#endif
 
        if (vid > 4095)
 #ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
 
        if (vid > 4095)
 #ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
@@ -2662,7 +2615,10 @@ static void i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
         * extra filter for vlan 0 tagged packets.
         */
        if (vid)
         * extra filter for vlan 0 tagged packets.
         */
        if (vid)
-               ret = i40e_vsi_add_vlan(vsi, vid);
+#ifndef HAVE_VLAN_RX_REGISTER
+               ret =
+#endif
+               i40e_vsi_add_vlan(vsi, vid);
 
 #ifndef HAVE_VLAN_RX_REGISTER
        if (!ret && (vid < VLAN_N_VID))
 
 #ifndef HAVE_VLAN_RX_REGISTER
        if (!ret && (vid < VLAN_N_VID))
@@ -2826,13 +2782,11 @@ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
 
        vsi->info.pvid = 0;
 }
 
        vsi->info.pvid = 0;
 }
-#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
 
 /**
  * i40e_add_del_cloud_filter - Add/del cloud filter
  * @pf: pointer to the physical function struct
  * @filter: cloud filter rule
 
 /**
  * i40e_add_del_cloud_filter - Add/del cloud filter
  * @pf: pointer to the physical function struct
  * @filter: cloud filter rule
- * @vsi: pointer to the destination vsi
  * @add: if true, add, if false, delete
  *
  * Add or delete a cloud filter for a specific flow spec.
  * @add: if true, add, if false, delete
  *
  * Add or delete a cloud filter for a specific flow spec.
@@ -2840,7 +2794,7 @@ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
  **/
 int i40e_add_del_cloud_filter(struct i40e_pf *pf,
                              struct i40e_cloud_filter *filter,
  **/
 int i40e_add_del_cloud_filter(struct i40e_pf *pf,
                              struct i40e_cloud_filter *filter,
-                             struct i40e_vsi *vsi, bool add)
+                             bool add)
 {
        struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
        u32 ipaddr;
 {
        struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
        u32 ipaddr;
@@ -2862,12 +2816,6 @@ int i40e_add_del_cloud_filter(struct i40e_pf *pf,
                        I40E_AQC_ADD_CLOUD_FILTER_IIP,
        };
 
                        I40E_AQC_ADD_CLOUD_FILTER_IIP,
        };
 
-       if (vsi == NULL)
-               return I40E_ERR_BAD_PTR;
-
-       if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_XVLAN)
-               return I40E_ERR_NOT_IMPLEMENTED;
-
        if ((filter->flags >= ARRAY_SIZE(flag_table)) ||
            (flag_table[filter->flags] == 0))
                return I40E_ERR_CONFIG;
        if ((filter->flags >= ARRAY_SIZE(flag_table)) ||
            (flag_table[filter->flags] == 0))
                return I40E_ERR_CONFIG;
@@ -2879,37 +2827,44 @@ int i40e_add_del_cloud_filter(struct i40e_pf *pf,
        /* the low index of data storing IP address indicate the last
         * byte on wire.
         */
        /* the low index of data storing IP address indicate the last
         * byte on wire.
         */
-       ipaddr = ntohl(filter->inner_ip[0]);
+       ipaddr = be32_to_cpu(filter->inner_ip[0]);
        memcpy(&cld_filter.ipaddr.v4.data, &ipaddr, 4);
        cld_filter.inner_vlan = cpu_to_le16(ntohs(filter->inner_vlan));
        cld_filter.tenant_id = cpu_to_le32(filter->tenant_id);
        memcpy(&cld_filter.ipaddr.v4.data, &ipaddr, 4);
        cld_filter.inner_vlan = cpu_to_le16(ntohs(filter->inner_vlan));
        cld_filter.tenant_id = cpu_to_le32(filter->tenant_id);
-       cld_filter.queue_number = cpu_to_le16(filter->queue_id);
 
 
-       /* Only supports VXLAN tunnel for now */
-       cld_filter.flags = cpu_to_le16(
-                               I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN <<
-                               I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
+       if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
+               cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
+                                            I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
 
 
-       if (filter->flags != I40E_CLOUD_FILTER_FLAGS_OMAC)
+       if (filter->flags != I40E_CLOUD_FILTER_FLAGS_OMAC) {
                cld_filter.flags |=
                        cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE);
                cld_filter.flags |=
                        cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE);
+               cld_filter.queue_number = cpu_to_le16(filter->queue_id);
+       }
 
        cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
 
        cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
-                       I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
+                                       I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
 
 
-       if (add)
-               ret = i40e_aq_add_cloud_filters(&pf->hw, vsi->seid,
-                                               &cld_filter, 1);
-       else
-               ret = i40e_aq_remove_cloud_filters(&pf->hw, vsi->seid,
+       if (add) {
+               ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
                                                &cld_filter, 1);
                                                &cld_filter, 1);
+       } else {
+               ret = i40e_aq_remove_cloud_filters(&pf->hw, filter->seid,
+                                                  &cld_filter, 1);
+               if (ret && pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+                       /* ignore error on delete of non-existent filter */
+                       ret = 0;
+                       pf->hw.aq.asq_last_status = 0;
+               }
+       }
+
        if (ret)
                dev_err(&pf->pdev->dev,
        if (ret)
                dev_err(&pf->pdev->dev,
-                       "fail to %s cloud filter, err %d aq_err %d\n",
-                       add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
+                       "fail to %s cloud filter, err %s aq_err %s\n",
+                       add ? "add" : "delete", i40e_stat_str(&pf->hw, ret),
+                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        return ret;
 }
        return ret;
 }
-#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
 
 /**
  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
 
 /**
  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
@@ -3111,8 +3066,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
        wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
        i40e_flush(hw);
 
        wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
        i40e_flush(hw);
 
-       clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
-
        /* cache tail off for easier writes later */
        ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
 
        /* cache tail off for easier writes later */
        ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
 
@@ -3140,34 +3093,21 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        memset(&rx_ctx, 0, sizeof(rx_ctx));
 
        ring->rx_buf_len = vsi->rx_buf_len;
        memset(&rx_ctx, 0, sizeof(rx_ctx));
 
        ring->rx_buf_len = vsi->rx_buf_len;
-       ring->rx_hdr_len = vsi->rx_hdr_len;
 
        rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
 
        rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
-       rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
 
        rx_ctx.base = (ring->dma / 128);
        rx_ctx.qlen = ring->count;
 
 
        rx_ctx.base = (ring->dma / 128);
        rx_ctx.qlen = ring->count;
 
-       if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
-               set_ring_16byte_desc_enabled(ring);
-               rx_ctx.dsize = 0;
-       } else {
-               rx_ctx.dsize = 1;
-       }
+       /* use 32 byte descriptors */
+       rx_ctx.dsize = 1;
 
 
-       rx_ctx.dtype = vsi->dtype;
-       if (vsi->dtype) {
-               set_ring_ps_enabled(ring);
-               rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
-                                 I40E_RX_SPLIT_IP      |
-                                 I40E_RX_SPLIT_TCP_UDP |
-                                 I40E_RX_SPLIT_SCTP;
-       } else {
-               rx_ctx.hsplit_0 = 0;
-       }
+       /* descriptor type is always zero
+        * rx_ctx.dtype = 0;
+        */
+       rx_ctx.hsplit_0 = 0;
 
 
-       rx_ctx.rxmax = min_t(u16, vsi->max_frame,
-                                 (chain_len * ring->rx_buf_len));
+       rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
        rx_ctx.lrxqthresh = 2;
        rx_ctx.crcstrip = 1;
        rx_ctx.l2tsel = 1;
        rx_ctx.lrxqthresh = 2;
        rx_ctx.crcstrip = 1;
        rx_ctx.l2tsel = 1;
@@ -3201,12 +3141,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
        writel(0, ring->tail);
 
        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
        writel(0, ring->tail);
 
-       if (ring_is_ps_enabled(ring)) {
-               i40e_alloc_rx_headers(ring);
-               i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
-       } else {
-               i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
-       }
+       i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
 
        return 0;
 }
 
        return 0;
 }
@@ -3245,40 +3180,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
        else
                vsi->max_frame = I40E_RXBUFFER_2048;
 
        else
                vsi->max_frame = I40E_RXBUFFER_2048;
 
-       /* figure out correct receive buffer length */
-       switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
-                                   I40E_FLAG_RX_PS_ENABLED)) {
-       case I40E_FLAG_RX_1BUF_ENABLED:
-               vsi->rx_hdr_len = 0;
-               vsi->rx_buf_len = vsi->max_frame;
-               vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
-               break;
-       case I40E_FLAG_RX_PS_ENABLED:
-               vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
-               vsi->rx_buf_len = I40E_RXBUFFER_2048;
-               vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
-               break;
-       default:
-               vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
-               vsi->rx_buf_len = I40E_RXBUFFER_2048;
-               vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
-               break;
-       }
+       vsi->rx_buf_len = I40E_RXBUFFER_2048;
 
 #ifdef I40E_FCOE
        /* setup rx buffer for FCoE */
        if ((vsi->type == I40E_VSI_FCOE) &&
            (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
 
 #ifdef I40E_FCOE
        /* setup rx buffer for FCoE */
        if ((vsi->type == I40E_VSI_FCOE) &&
            (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
-               vsi->rx_hdr_len = 0;
                vsi->rx_buf_len = I40E_RXBUFFER_3072;
                vsi->max_frame = I40E_RXBUFFER_3072;
                vsi->rx_buf_len = I40E_RXBUFFER_3072;
                vsi->max_frame = I40E_RXBUFFER_3072;
-               vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
        }
 
 #endif /* I40E_FCOE */
        /* round up for the chip's needs */
        }
 
 #endif /* I40E_FCOE */
        /* round up for the chip's needs */
-       vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
-                               BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
        vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
                                BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
        vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
                                BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
@@ -3356,6 +3269,23 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
        }
 }
 
        }
 }
 
+/**
+ * i40e_cloud_filter_restore - Restore the switch's cloud filters
+ * @pf: Pointer to the targeted VSI
+ *
+ * This function replays the cloud filter hlist into the hw switch
+ **/
+static void i40e_cloud_filter_restore(struct i40e_pf *pf)
+{
+       struct i40e_cloud_filter *filter;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_safe(filter, node,
+                                 &pf->cloud_filter_list, cloud_node) {
+               i40e_add_del_cloud_filter(pf, filter, true);
+       }
+}
+
 /**
  * i40e_vsi_configure - Set up the VSI for action
  * @vsi: the VSI being configured
 /**
  * i40e_vsi_configure - Set up the VSI for action
  * @vsi: the VSI being configured
@@ -3534,36 +3464,21 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
 /**
  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
  * @pf: board private structure
 /**
  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
  * @pf: board private structure
+ * @clearpba: true when all pending interrupt events should be cleared
  **/
  **/
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
 {
        struct i40e_hw *hw = &pf->hw;
        u32 val;
 
        val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
 {
        struct i40e_hw *hw = &pf->hw;
        u32 val;
 
        val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
-             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+             (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
              (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
 
        wr32(hw, I40E_PFINT_DYN_CTL0, val);
        i40e_flush(hw);
 }
 
              (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
 
        wr32(hw, I40E_PFINT_DYN_CTL0, val);
        i40e_flush(hw);
 }
 
-/**
- * i40e_irq_dynamic_disable - Disable default interrupt generation settings
- * @vsi: pointer to a vsi
- * @vector: disable a particular Hw Interrupt vector
- **/
-void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
-{
-       struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       u32 val;
-
-       val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
-       wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
-       i40e_flush(hw);
-}
-
 /**
  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
  * @irq: interrupt number
 /**
  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
  * @irq: interrupt number
@@ -3658,11 +3573,20 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
        int base = vsi->base_vector;
        int i;
 
        int base = vsi->base_vector;
        int i;
 
+       /* disable interrupt causation from each queue */
        for (i = 0; i < vsi->num_queue_pairs; i++) {
        for (i = 0; i < vsi->num_queue_pairs; i++) {
-               wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
-               wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
+               u32 val;
+
+               val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+               val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+               val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
+               val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
        }
 
        }
 
+       /* disable each interrupt */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                for (i = vsi->base_vector;
                     i < (vsi->num_q_vectors + vsi->base_vector); i++)
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                for (i = vsi->base_vector;
                     i < (vsi->num_q_vectors + vsi->base_vector); i++)
@@ -3693,7 +3617,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
                for (i = 0; i < vsi->num_q_vectors; i++)
                        i40e_irq_dynamic_enable(vsi, i);
        } else {
                for (i = 0; i < vsi->num_q_vectors; i++)
                        i40e_irq_dynamic_enable(vsi, i);
        } else {
-               i40e_irq_dynamic_enable_icr0(pf);
+               i40e_irq_dynamic_enable_icr0(pf, true);
        }
 
        i40e_flush(&pf->hw);
        }
 
        i40e_flush(&pf->hw);
@@ -3742,17 +3666,12 @@ static irqreturn_t i40e_intr(int irq, void *data)
 
        /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
        if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
 
        /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
        if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
-
-               /* temporarily disable queue cause for NAPI processing */
-               u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
-
-               qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
-               wr32(hw, I40E_QINT_RQCTL(0), qval);
-
-               qval = rd32(hw, I40E_QINT_TQCTL(0));
-               qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
-               wr32(hw, I40E_QINT_TQCTL(0), qval);
-
+               /* We do not have a way to disarm Queue causes while leaving
+                * interrupt enabled for all other causes, ideally
+                * interrupt should be disabled while we are in NAPI but
+                * this is not a performance path and napi_schedule()
+                * can deal with rescheduling.
+                */
                if (!test_bit(__I40E_DOWN, &pf->state))
                        napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
        }
                if (!test_bit(__I40E_DOWN, &pf->state))
                        napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
        }
@@ -3760,6 +3679,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
        if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
                ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
                set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
        if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
                ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
                set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+               i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
        }
 
        if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
        }
 
        if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
@@ -3832,7 +3752,7 @@ enable_intr:
        wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
        if (!test_bit(__I40E_DOWN, &pf->state)) {
                i40e_service_event_schedule(pf);
        wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
        if (!test_bit(__I40E_DOWN, &pf->state)) {
                i40e_service_event_schedule(pf);
-               i40e_irq_dynamic_enable_icr0(pf);
+               i40e_irq_dynamic_enable_icr0(pf, false);
        }
 
        return ret;
        }
 
        return ret;
@@ -4036,7 +3956,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /**
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /**
- * i40e_netpoll - A Polling 'interrupt'handler
+ * i40e_netpoll - A Polling 'interrupt' handler
  * @netdev: network interface device structure
  *
  * This is used by netconsole to send skbs without having to re-enable
  * @netdev: network interface device structure
  *
  * This is used by netconsole to send skbs without having to re-enable
@@ -4215,6 +4135,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
                else
                        rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
                wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
                else
                        rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
                wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+               /* No waiting for the Tx queue to disable */
+               if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
+                       continue;
 
                /* wait for the change to finish */
                ret = i40e_pf_rxq_wait(pf, pf_q, enable);
 
                /* wait for the change to finish */
                ret = i40e_pf_rxq_wait(pf, pf_q, enable);
@@ -4286,6 +4209,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
                        irq_set_affinity_hint(pf->msix_entries[vector].vector,
                                              NULL);
 #endif
                        irq_set_affinity_hint(pf->msix_entries[vector].vector,
                                              NULL);
 #endif
+                       synchronize_irq(pf->msix_entries[vector].vector);
                        free_irq(pf->msix_entries[vector].vector,
                                 vsi->q_vectors[i]);
 
                        free_irq(pf->msix_entries[vector].vector,
                                 vsi->q_vectors[i]);
 
@@ -4395,10 +4319,8 @@ static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
                ring->q_vector = NULL;
 
        /* only VSI w/ an associated netdev is set up w/ NAPI */
                ring->q_vector = NULL;
 
        /* only VSI w/ an associated netdev is set up w/ NAPI */
-       if (vsi->netdev) {
-               napi_hash_del(&q_vector->napi);
+       if (vsi->netdev)
                netif_napi_del(&q_vector->napi);
                netif_napi_del(&q_vector->napi);
-       }
 
        vsi->q_vectors[v_idx] = NULL;
 
 
        vsi->q_vectors[v_idx] = NULL;
 
@@ -4451,7 +4373,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
        int i;
 
        i40e_stop_misc_vector(pf);
        int i;
 
        i40e_stop_misc_vector(pf);
-       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
                synchronize_irq(pf->msix_entries[0].vector);
                free_irq(pf->msix_entries[0].vector, pf);
        }
                synchronize_irq(pf->msix_entries[0].vector);
                free_irq(pf->msix_entries[0].vector, pf);
        }
@@ -4474,10 +4396,8 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
        if (!vsi->netdev)
                return;
 
        if (!vsi->netdev)
                return;
 
-       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
-               i40e_qv_init_lock(vsi->q_vectors[q_idx]);
+       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
                napi_enable(&vsi->q_vectors[q_idx]->napi);
                napi_enable(&vsi->q_vectors[q_idx]->napi);
-       }
 }
 
 /**
 }
 
 /**
@@ -4486,20 +4406,13 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
  **/
 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
 {
  **/
 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
 {
-       int limiter = 20;
        int q_idx;
 
        if (!vsi->netdev)
                return;
 
        int q_idx;
 
        if (!vsi->netdev)
                return;
 
-       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
                napi_disable(&vsi->q_vectors[q_idx]->napi);
                napi_disable(&vsi->q_vectors[q_idx]->napi);
-               while (!i40e_qv_disable(vsi->q_vectors[q_idx]) && limiter--)
-                       usleep_range(1000, 2000);
-               if (!limiter)
-                       dev_info(&vsi->back->pdev->dev,
-                                "QV %d locked\n", q_idx);
-       }
 }
 
 /**
 }
 
 /**
@@ -4594,12 +4507,12 @@ void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
 
 #ifdef CONFIG_DCB
 /**
 
 #ifdef CONFIG_DCB
 /**
- * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
+ * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
  * @vsi: the VSI being configured
  *
  * @vsi: the VSI being configured
  *
- * This function waits for the given VSI's Tx queues to be disabled.
+ * This function waits for the given VSI's queues to be disabled.
 **/
 **/
-static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
+static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        int i, pf_q, ret;
 {
        struct i40e_pf *pf = vsi->back;
        int i, pf_q, ret;
@@ -4616,24 +4529,36 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
                }
        }
 
                }
        }
 
+       pf_q = vsi->base_queue;
+       for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+               /* Check and wait for the disable status of the queue */
+               ret = i40e_pf_rxq_wait(pf, pf_q, false);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "VSI seid %d Rx ring %d disable timeout\n",
+                                vsi->seid, pf_q);
+                       return ret;
+               }
+       }
+
        return 0;
 }
 
 /**
        return 0;
 }
 
 /**
- * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
+ * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
  * @pf: the PF
  *
  * @pf: the PF
  *
- * This function waits for the Tx queues to be in disabled state for all the
+ * This function waits for the queues to be in disabled state for all the
  * VSIs that are managed by this PF.
  **/
  * VSIs that are managed by this PF.
  **/
-static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
+static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
 {
        int v, ret = 0;
 
        for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
                /* No need to wait for FCoE VSI queues */
                if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
 {
        int v, ret = 0;
 
        for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
                /* No need to wait for FCoE VSI queues */
                if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
-                       ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
+                       ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
                        if (ret)
                                break;
                }
                        if (ret)
                                break;
                }
@@ -4664,7 +4589,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
                if (app.selector == I40E_APP_SEL_TCPIP &&
                    app.protocolid == I40E_APP_PROTOID_ISCSI) {
                        tc = dcbcfg->etscfg.prioritytable[app.priority];
                if (app.selector == I40E_APP_SEL_TCPIP &&
                    app.protocolid == I40E_APP_PROTOID_ISCSI) {
                        tc = dcbcfg->etscfg.prioritytable[app.priority];
-                       enabled_tc |= BIT_ULL(tc);
+                       enabled_tc |= BIT(tc);
                        break;
                }
        }
                        break;
                }
        }
@@ -4748,7 +4673,7 @@ u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
        /* At least have TC0 */
        enabled_tc = (enabled_tc ? enabled_tc : 0x1);
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
        /* At least have TC0 */
        enabled_tc = (enabled_tc ? enabled_tc : 0x1);
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & BIT_ULL(i))
+               if (enabled_tc & BIT(i))
                        num_tc++;
        }
        return num_tc;
                        num_tc++;
        }
        return num_tc;
@@ -4770,7 +4695,7 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
 
        /* Find the first enabled TC */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 
        /* Find the first enabled TC */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & BIT_ULL(i))
+               if (enabled_tc & BIT(i))
                        break;
        }
 
                        break;
        }
 
@@ -4930,7 +4855,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
                 * will set the numtc for netdev as 2 that will be
                 * referenced by the netdev layer as TC 0 and 1.
                 */
                 * will set the numtc for netdev as 2 that will be
                 * referenced by the netdev layer as TC 0 and 1.
                 */
-               if (vsi->tc_config.enabled_tc & BIT_ULL(i))
+               if (vsi->tc_config.enabled_tc & BIT(i))
                        netdev_set_tc_queue(netdev,
                                        vsi->tc_config.tc_info[i].netdev_tc,
                                        vsi->tc_config.tc_info[i].qcount,
                        netdev_set_tc_queue(netdev,
                                        vsi->tc_config.tc_info[i].netdev_tc,
                                        vsi->tc_config.tc_info[i].qcount,
@@ -4992,7 +4917,7 @@ int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now across all VSIs */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 
        /* Enable ETS TCs with equal BW Share for now across all VSIs */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & BIT_ULL(i))
+               if (enabled_tc & BIT(i))
                        bw_share[i] = 1;
        }
 
                        bw_share[i] = 1;
        }
 
@@ -5066,7 +4991,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 
        /* Enable ETS TCs with equal BW Share for now */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & BIT_ULL(i))
+               if (enabled_tc & BIT(i))
                        bw_data.tc_bw_share_credits[i] = 1;
        }
 
                        bw_data.tc_bw_share_credits[i] = 1;
        }
 
@@ -5199,8 +5124,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
        int err = 0;
 
        /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
        int err = 0;
 
        /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
-       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
-           (pf->hw.aq.fw_maj_ver < 4))
+       if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
                goto out;
 
        /* Get the initial DCB configuration */
                goto out;
 
        /* Get the initial DCB configuration */
@@ -5246,7 +5170,7 @@ out:
  */
 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 {
  */
 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 {
-       char *speed = "Unknown";
+       char *speed = "Unknown ";
        char *fc = "Unknown";
 
        if (vsi->current_isup == isup)
        char *fc = "Unknown";
 
        if (vsi->current_isup == isup)
@@ -5346,7 +5270,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
                                "the driver failed to link because an unqualified module was detected.");
        }
 
                                "the driver failed to link because an unqualified module was detected.");
        }
 
-       /* replay FDIR SB filters */
+       /* replay flow filters */
        if (vsi->type == I40E_VSI_FDIR) {
                /* reset fd counters */
                pf->fd_add_err = pf->fd_atr_cnt = 0;
        if (vsi->type == I40E_VSI_FDIR) {
                /* reset fd counters */
                pf->fd_add_err = pf->fd_atr_cnt = 0;
@@ -5357,6 +5281,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
                        pf->fd_tcp_rule = 0;
                }
                i40e_fdir_filter_restore(vsi);
                        pf->fd_tcp_rule = 0;
                }
                i40e_fdir_filter_restore(vsi);
+               i40e_cloud_filter_restore(pf);
        }
        i40e_service_event_schedule(pf);
 
        }
        i40e_service_event_schedule(pf);
 
@@ -5469,7 +5394,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
 
        /* Generate TC map for number of tc requested */
        for (i = 0; i < tc; i++)
 
        /* Generate TC map for number of tc requested */
        for (i = 0; i < tc; i++)
-               enabled_tc |= BIT_ULL(i);
+               enabled_tc |= BIT(i);
 
        /* Requesting same TC configuration as already enabled */
        if (enabled_tc == vsi->tc_config.enabled_tc)
 
        /* Requesting same TC configuration as already enabled */
        if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -5492,6 +5417,21 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
 exit:
        return ret;
 }
 exit:
        return ret;
 }
+
+#ifdef NETIF_F_HW_TC
+#ifdef I40E_FCOE
+int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+                   struct tc_to_netdev *tc)
+#else
+static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+                          struct tc_to_netdev *tc)
+#endif
+{
+       if (tc->type != TC_SETUP_MQPRIO)
+               return -EINVAL;
+       return i40e_setup_tc(netdev, tc->tc);
+}
+#endif /* NETIF_F_HW_TC */
 #endif /* HAVE_SETUP_TC */
 
 /**
 #endif /* HAVE_SETUP_TC */
 
 /**
@@ -5533,13 +5473,21 @@ int i40e_open(struct net_device *netdev)
 
 #ifdef HAVE_VXLAN_RX_OFFLOAD
 #ifdef HAVE_VXLAN_CHECKS
 
 #ifdef HAVE_VXLAN_RX_OFFLOAD
 #ifdef HAVE_VXLAN_CHECKS
+#if IS_ENABLED(CONFIG_VXLAN)
        vxlan_get_rx_port(netdev);
        vxlan_get_rx_port(netdev);
+#endif
 #else
 #if IS_ENABLED(CONFIG_VXLAN)
        vxlan_get_rx_port(netdev);
 #endif
 #endif /* HAVE_VXLAN_CHECKS */
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
 #else
 #if IS_ENABLED(CONFIG_VXLAN)
        vxlan_get_rx_port(netdev);
 #endif
 #endif /* HAVE_VXLAN_CHECKS */
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
+#ifdef HAVE_GENEVE_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_GENEVE)
+       if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)
+               geneve_get_rx_port(netdev);
+#endif
+#endif
 
        return 0;
 }
 
        return 0;
 }
@@ -5617,23 +5565,71 @@ err_setup_tx:
 }
 
 /**
 }
 
 /**
- * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
+ * i40e_cleanup_flex_filter - Cleans up the flex filter state and registers
  * @pf: Pointer to PF
  *
  * @pf: Pointer to PF
  *
- * This function destroys the hlist where all the Flow Director
- * filters were saved.
+ * This function programs relevant register to good known initial state
+ * which were programmed as part of adding flow director flex filter
  **/
  **/
-static void i40e_fdir_filter_exit(struct i40e_pf *pf)
+static void i40e_cleanup_flex_filter(struct i40e_pf *pf)
 {
 {
-       struct i40e_fdir_filter *filter;
-       struct hlist_node *node2;
+       int i;
 
 
-       hlist_for_each_entry_safe(filter, node2,
-                                 &pf->fdir_filter_list, fdir_node) {
-               hlist_del(&filter->fdir_node);
+       if (unlikely(!pf))
+               return;
+
+       /* Reset flow specific flex registers to default state */
+       for (i = 0; i < I40E_MAX_FLEX_PIT_REG; i++)
+               i40e_write_rx_ctl(&pf->hw, I40E_PRTQF_FLX_PIT(i), 0);
+
+       /* Reset flow specific input set register to default state */
+       if (pf->fd_tcp4_input_set)
+               i40e_write_fd_input_set(pf,
+                                       I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+                                       pf->fd_tcp4_input_set);
+       if (pf->fd_udp4_input_set)
+               i40e_write_fd_input_set(pf,
+                                       I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+                                       pf->fd_udp4_input_set);
+       if (pf->fd_sctp4_input_set)
+               i40e_write_fd_input_set(pf,
+                                       I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+                                       pf->fd_sctp4_input_set);
+       if (pf->fd_ip4_input_set)
+               i40e_write_fd_input_set(pf,
+                                       I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+                                       pf->fd_ip4_input_set);
+       pf->fd_tcp4_input_set = 0;
+       pf->fd_udp4_input_set = 0;
+       pf->fd_sctp4_input_set = 0;
+       pf->fd_ip4_input_set = 0;
+       pf->fd_tcp4_filter_cnt = 0;
+       pf->fd_udp4_filter_cnt = 0;
+       pf->fd_sctp4_filter_cnt = 0;
+       pf->fd_ip4_filter_cnt = 0;
+       pf->fd_flex_filter_cnt = 0;
+}
+
+/**
+ * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
+ * @pf: Pointer to PF
+ *
+ * This function destroys the hlist where all the Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_exit(struct i40e_pf *pf)
+{
+       struct i40e_fdir_filter *filter;
+       struct hlist_node *node2;
+
+       hlist_for_each_entry_safe(filter, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               hlist_del(&filter->fdir_node);
                kfree(filter);
        }
                kfree(filter);
        }
-       pf->fdir_pf_active_filters = 0;
+
+       /* Cleanup state related to flow director flex filter */
+       i40e_cleanup_flex_filter(pf);
 }
 
 /**
 }
 
 /**
@@ -5646,11 +5642,7 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
  *
  * Returns 0, this is not allowed to fail
  **/
  *
  * Returns 0, this is not allowed to fail
  **/
-#ifdef I40E_FCOE
 int i40e_close(struct net_device *netdev)
 int i40e_close(struct net_device *netdev)
-#else
-static int i40e_close(struct net_device *netdev)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -5675,8 +5667,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
 
        WARN_ON(in_interrupt());
 
 
        WARN_ON(in_interrupt());
 
-       if (i40e_check_asq_alive(&pf->hw))
-               i40e_vc_notify_reset(pf);
 
        /* do the biggest reset indicated */
        if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
 
        /* do the biggest reset indicated */
        if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -5915,8 +5905,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        if (ret)
                goto exit;
 
        if (ret)
                goto exit;
 
-       /* Wait for the PF's Tx queues to be disabled */
-       ret = i40e_pf_wait_txq_disabled(pf);
+       /* Wait for the PF's queues to be disabled */
+       ret = i40e_pf_wait_queues_disabled(pf);
        if (ret) {
                /* Schedule PF reset to recover */
                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
        if (ret) {
                /* Schedule PF reset to recover */
                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
@@ -5972,7 +5962,7 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
  **/
 static void i40e_service_event_complete(struct i40e_pf *pf)
 {
  **/
 static void i40e_service_event_complete(struct i40e_pf *pf)
 {
-       BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
+       WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
 
        /* flush memory to make sure state is correct before next watchdog */
        smp_mb__before_atomic();
 
        /* flush memory to make sure state is correct before next watchdog */
        smp_mb__before_atomic();
@@ -6070,6 +6060,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
                                pf->fdir_pf_active_filters--;
                        }
                }
                                pf->fdir_pf_active_filters--;
                        }
                }
+               /* Cleanup state related to flow director flex filter */
+               i40e_cleanup_flex_filter(pf);
        }
 }
 
        }
 }
 
@@ -6245,6 +6237,9 @@ static void i40e_link_event(struct i40e_pf *pf)
        i40e_status status;
        bool new_link, old_link;
 
        i40e_status status;
        bool new_link, old_link;
 
+       /* save off old link status information */
+       pf->hw.phy.link_info_old = pf->hw.phy.link_info;
+
        /* set this to force the get_link_status call to refresh state */
        pf->hw.phy.get_link_info = true;
 
        /* set this to force the get_link_status call to refresh state */
        pf->hw.phy.get_link_info = true;
 
@@ -6286,68 +6281,6 @@ static void i40e_link_event(struct i40e_pf *pf)
 #endif /* HAVE_PTP_1588_CLOCK */
 }
 
 #endif /* HAVE_PTP_1588_CLOCK */
 }
 
-/**
- * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
- * @pf: board private structure
- *
- * Set the per-queue flags to request a check for stuck queues in the irq
- * clean functions, then force interrupts to be sure the irq clean is called.
- **/
-static void i40e_check_hang_subtask(struct i40e_pf *pf)
-{
-       int i, v;
-
-       /* If we're down or resetting, just bail */
-       if (test_bit(__I40E_DOWN, &pf->state) ||
-           test_bit(__I40E_CONFIG_BUSY, &pf->state))
-               return;
-
-       /* for each VSI/netdev
-        *     for each Tx queue
-        *         set the check flag
-        *     for each q_vector
-        *         force an interrupt
-        */
-       for (v = 0; v < pf->num_alloc_vsi; v++) {
-               struct i40e_vsi *vsi = pf->vsi[v];
-               int armed = 0;
-
-               if (!pf->vsi[v] ||
-                   test_bit(__I40E_DOWN, &vsi->state) ||
-                   (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
-                       continue;
-
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       set_check_for_tx_hang(vsi->tx_rings[i]);
-                       if (test_bit(__I40E_HANG_CHECK_ARMED,
-                                    &vsi->tx_rings[i]->state))
-                               armed++;
-               }
-
-               if (armed) {
-                       if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
-                               wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
-                                    (I40E_PFINT_DYN_CTL0_INTENA_MASK |
-                                     I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
-                                     I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
-                                     I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
-                                     I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
-                       } else {
-                               u16 vec = vsi->base_vector - 1;
-                               u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
-                                     I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
-                                     I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
-                                     I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
-                                     I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
-                               for (i = 0; i < vsi->num_q_vectors; i++, vec++)
-                                       wr32(&vsi->back->hw,
-                                            I40E_PFINT_DYN_CTLN(vec), val);
-                       }
-                       i40e_flush(&vsi->back->hw);
-               }
-       }
-}
-
 /**
  * i40e_watchdog_subtask - periodic checks not using event driven response
  * @pf: board private structure
 /**
  * i40e_watchdog_subtask - periodic checks not using event driven response
  * @pf: board private structure
@@ -6367,7 +6300,6 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
                return;
        pf->service_timer_previous = jiffies;
 
                return;
        pf->service_timer_previous = jiffies;
 
-       i40e_check_hang_subtask(pf);
        if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
                i40e_link_event(pf);
 
        if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
                i40e_link_event(pf);
 
@@ -6400,23 +6332,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 
        rtnl_lock();
        if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
 
        rtnl_lock();
        if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
+               reset_flags |= BIT(__I40E_REINIT_REQUESTED);
                clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
                clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
+               reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
                clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
                clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
+               reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
                clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
                clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
+               reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
+               reset_flags |= BIT(__I40E_DOWN_REQUESTED);
                clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
        }
 
                clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
        }
 
@@ -6446,13 +6378,9 @@ unlock:
 static void i40e_handle_link_event(struct i40e_pf *pf,
                                   struct i40e_arq_event_info *e)
 {
 static void i40e_handle_link_event(struct i40e_pf *pf,
                                   struct i40e_arq_event_info *e)
 {
-       struct i40e_hw *hw = &pf->hw;
        struct i40e_aqc_get_link_status *status =
                (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
 
        struct i40e_aqc_get_link_status *status =
                (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
 
-       /* save off old link status information */
-       hw->phy.link_info_old = hw->phy.link_info;
-
        /* Do a new status request to re-enable LSE reporting
         * and load new status information into the hw struct
         * This completely ignores any state information
        /* Do a new status request to re-enable LSE reporting
         * and load new status information into the hw struct
         * This completely ignores any state information
@@ -6491,15 +6419,19 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        val = rd32(&pf->hw, pf->hw.aq.arq.len);
        oldval = val;
        if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
        val = rd32(&pf->hw, pf->hw.aq.arq.len);
        oldval = val;
        if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
-               dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+               if (hw->debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
                val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
        }
        if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
                val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
        }
        if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
-               dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+               if (hw->debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
                val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
                val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
+               pf->arq_overflows++;
        }
        if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
        }
        if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
-               dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+               if (hw->debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
                val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
        }
        if (oldval != val)
                val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
        }
        if (oldval != val)
@@ -6508,15 +6440,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        val = rd32(&pf->hw, pf->hw.aq.asq.len);
        oldval = val;
        if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
        val = rd32(&pf->hw, pf->hw.aq.asq.len);
        oldval = val;
        if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
-               dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+               if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
                val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
        }
        if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
                val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
        }
        if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
-               dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+               if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
                val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
        }
        if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
                val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
        }
        if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
-               dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+               if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
                val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
        }
        if (oldval != val)
                val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
        }
        if (oldval != val)
@@ -6567,11 +6502,14 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
                        break;
                case i40e_aqc_opc_nvm_erase:
                case i40e_aqc_opc_nvm_update:
                        break;
                case i40e_aqc_opc_nvm_erase:
                case i40e_aqc_opc_nvm_update:
-                       i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
+               case i40e_aqc_opc_oem_post_update:
+                       i40e_debug(&pf->hw, I40E_DEBUG_NVM,
+                                  "ARQ NVM operation 0x%04x completed\n",
+                                  opcode);
                        break;
                default:
                        dev_info(&pf->pdev->dev,
                        break;
                default:
                        dev_info(&pf->pdev->dev,
-                                "ARQ Error: Unknown event 0x%04x received\n",
+                                "ARQ: Unknown event 0x%04x ignored\n",
                                 opcode);
                        break;
                }
                                 opcode);
                        break;
                }
@@ -6922,6 +6860,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
        clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
        if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
                return;
        clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
        if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
                return;
+       if (i40e_check_asq_alive(&pf->hw))
+               i40e_vc_notify_reset(pf);
 
        dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
 
        dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
@@ -6970,7 +6910,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        struct i40e_hw *hw = &pf->hw;
        u8 set_fc_aq_fail = 0;
        i40e_status ret;
        struct i40e_hw *hw = &pf->hw;
        u8 set_fc_aq_fail = 0;
        i40e_status ret;
-       u32 v;
+       u32 val;
+       int v;
 
        /* Now we wait for GRST to settle out.
         * We don't have to delete the VEBs or VSIs from the hw switch
 
        /* Now we wait for GRST to settle out.
         * We don't have to delete the VEBs or VSIs from the hw switch
@@ -7037,12 +6978,13 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        if (ret)
                goto end_core_reset;
 
        if (ret)
                goto end_core_reset;
 
-       /* driver is only interested in link up/down and module qualification
-        * reports from firmware
+       /* The driver only wants link up/down and module qualification
+        * reports from firmware.  Note the negative logic.
         */
        ret = i40e_aq_set_phy_int_mask(&pf->hw,
         */
        ret = i40e_aq_set_phy_int_mask(&pf->hw,
-                                      I40E_AQ_EVENT_LINK_UPDOWN |
-                                      I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+                                      ~(I40E_AQ_EVENT_LINK_UPDOWN |
+                                        I40E_AQ_EVENT_MEDIA_NA |
+                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
        if (ret)
                dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
                         i40e_stat_str(&pf->hw, ret),
        if (ret)
                dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
                         i40e_stat_str(&pf->hw, ret),
@@ -7109,8 +7051,21 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                }
        }
 
                }
        }
 
-       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
-           (pf->hw.aq.fw_maj_ver < 4)) {
+       /* Reconfigure hardware for allowing smaller MSS in the case
+        * of TSO, so that we avoid the MDD being fired and causing
+        * a reset in the case of small MSS+TSO.
+        */
+#define I40E_REG_MSS          0x000E64DC
+#define I40E_REG_MSS_MIN_MASK 0x3FF0000
+#define I40E_64BYTE_MSS       0x400000
+       val = rd32(hw, I40E_REG_MSS);
+       if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+               val &= ~I40E_REG_MSS_MIN_MASK;
+               val |= I40E_64BYTE_MSS;
+               wr32(hw, I40E_REG_MSS, val);
+       }
+
+       if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
                msleep(75);
                ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (ret)
                msleep(75);
                ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (ret)
@@ -7268,30 +7223,31 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
        i40e_flush(hw);
 }
 
        i40e_flush(hw);
 }
 
-#ifdef HAVE_VXLAN_RX_OFFLOAD
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
 /**
 /**
- * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
  * @pf: board private structure
  **/
  * @pf: board private structure
  **/
-static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
        __be16 port;
        int i;
 
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
        __be16 port;
        int i;
 
-       if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+       if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
                return;
 
                return;
 
-       pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+       pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
 
        for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
 
        for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-               if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
-                       pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
-                       port = pf->vxlan_ports[i];
+               if (pf->pending_udp_bitmap & BIT_ULL(i)) {
+                       pf->pending_udp_bitmap &= ~BIT_ULL(i);
+                       port = pf->udp_ports[i].index;
                        if (port)
                                ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
                        if (port)
                                ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
-                                                    I40E_AQC_TUNNEL_TYPE_VXLAN,
+                                                    pf->udp_ports[i].type,
                                                     NULL, NULL);
                        else
                                ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
                                                     NULL, NULL);
                        else
                                ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -7304,18 +7260,23 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
                                         i40e_stat_str(&pf->hw, ret),
                                         i40e_aq_str(&pf->hw,
                                                    pf->hw.aq.asq_last_status));
                                         i40e_stat_str(&pf->hw, ret),
                                         i40e_aq_str(&pf->hw,
                                                    pf->hw.aq.asq_last_status));
-                               pf->vxlan_ports[i] = 0;
+                               pf->udp_ports[i].index = 0;
                        } else {
                        } else {
-                               dev_info(&pf->pdev->dev,
-                                        "%s vxlan port %d, index %d success\n",
-                                        port ? "add" : "delete",
-                                        ntohs(port), i);
+                               if (port)
+                                       dev_dbg(&pf->pdev->dev,
+                                               "add vxlan port %d, index %d success\n",
+                                               ntohs(port), i);
+                               else
+                                       dev_dbg(&pf->pdev->dev,
+                                               "delete vxlan port success\n");
+
                        }
                }
        }
 }
 
                        }
                }
        }
 }
 
-#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#endif /* CONFIG_GENEVE */
+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_GENEVE_RX_OFFLOAD */
 
 /**
  * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
 
 /**
  * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
@@ -7332,7 +7293,7 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
 {
        struct i40e_ring *tx_ring = NULL;
        struct i40e_pf  *pf;
 {
        struct i40e_ring *tx_ring = NULL;
        struct i40e_pf  *pf;
-       u32 head, val, tx_pending;
+       u32 val, tx_pending_hw;
        int i;
 
        pf = vsi->back;
        int i;
 
        pf = vsi->back;
@@ -7358,17 +7319,26 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
        else
                val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
 
        else
                val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
 
-       head = i40e_get_head(tx_ring);
-
-       tx_pending = i40e_get_tx_pending(tx_ring);
+       tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
 
        /* Interrupts are disabled and TX pending is non-zero,
         * trigger the SW interrupt (don't wait). Worst case
         * there will be one extra interrupt which may result
         * into not cleaning any queues because queues are cleaned.
         */
 
        /* Interrupts are disabled and TX pending is non-zero,
         * trigger the SW interrupt (don't wait). Worst case
         * there will be one extra interrupt which may result
         * into not cleaning any queues because queues are cleaned.
         */
-       if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+       if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
                i40e_force_wb(vsi, tx_ring->q_vector);
                i40e_force_wb(vsi, tx_ring->q_vector);
+
+       /* This is the case where we have interrupts missing,
+        * so the tx_pending in HW will most likely be 0, but we
+        * will have tx_pending in sw since the WB happened but the
+        * interrupt got lost.
+        */
+       if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true)
+          && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
+               if (napi_reschedule(&tx_ring->q_vector->napi))
+                       tx_ring->tx_stats.tx_lost_interrupt++;
+       }
 }
 
 /**
 }
 
 /**
@@ -7383,7 +7353,7 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf)
 {
        struct net_device *netdev;
        struct i40e_vsi *vsi;
 {
        struct net_device *netdev;
        struct i40e_vsi *vsi;
-       int i;
+       unsigned int i;
 
        /* Only for LAN VSI */
        vsi = pf->vsi[pf->lan_vsi];
 
        /* Only for LAN VSI */
        vsi = pf->vsi[pf->lan_vsi];
@@ -7443,10 +7413,12 @@ static void i40e_service_task(struct work_struct *work)
        i40e_vc_process_vflr_event(pf);
        i40e_watchdog_subtask(pf);
        i40e_fdir_reinit_subtask(pf);
        i40e_vc_process_vflr_event(pf);
        i40e_watchdog_subtask(pf);
        i40e_fdir_reinit_subtask(pf);
-#ifdef HAVE_VXLAN_RX_OFFLOAD
-       i40e_sync_vxlan_filters_subtask(pf);
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
+       i40e_sync_udp_filters_subtask(pf);
 
 
-#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_GENEVE_RX_OFFLOAD */
 
        i40e_clean_adminq_subtask(pf);
 
 
        i40e_clean_adminq_subtask(pf);
 
@@ -7555,7 +7527,7 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
 
        if (alloc_qvectors) {
                /* allocate memory for q_vector pointers */
 
        if (alloc_qvectors) {
                /* allocate memory for q_vector pointers */
-               size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+               size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
                vsi->q_vectors = kzalloc(size, GFP_KERNEL);
                if (!vsi->q_vectors) {
                        ret = -ENOMEM;
                vsi->q_vectors = kzalloc(size, GFP_KERNEL);
                if (!vsi->q_vectors) {
                        ret = -ENOMEM;
@@ -7714,6 +7686,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
        i40e_vsi_free_arrays(vsi, true);
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
        i40e_vsi_free_arrays(vsi, true);
+       i40e_clear_rss_config_user(vsi);
 
        pf->vsi[vsi->idx] = NULL;
        if (vsi->idx < pf->next_vsi)
 
        pf->vsi[vsi->idx] = NULL;
        if (vsi->idx < pf->next_vsi)
@@ -7770,6 +7743,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                tx_ring->count = vsi->num_desc;
                tx_ring->size = 0;
                tx_ring->dcb_tc = 0;
                tx_ring->count = vsi->num_desc;
                tx_ring->size = 0;
                tx_ring->dcb_tc = 0;
+
+               if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+                       tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
                vsi->tx_rings[i] = tx_ring;
 
                rx_ring = &tx_ring[1];
                vsi->tx_rings[i] = tx_ring;
 
                rx_ring = &tx_ring[1];
@@ -7782,10 +7758,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                rx_ring->count = vsi->num_desc;
                rx_ring->size = 0;
                rx_ring->dcb_tc = 0;
                rx_ring->count = vsi->num_desc;
                rx_ring->size = 0;
                rx_ring->dcb_tc = 0;
-               if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
-                       set_ring_16byte_desc_enabled(rx_ring);
-               else
-                       clear_ring_16byte_desc_enabled(rx_ring);
                vsi->rx_rings[i] = rx_ring;
        }
 
                vsi->rx_rings[i] = rx_ring;
        }
 
@@ -8046,11 +8018,9 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
 #ifdef HAVE_IRQ_AFFINITY_HINT
        cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
 #endif
 #ifdef HAVE_IRQ_AFFINITY_HINT
        cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
 #endif
-       if (vsi->netdev) {
+       if (vsi->netdev)
                netif_napi_add(vsi->netdev, &q_vector->napi,
                               i40e_napi_poll, NAPI_POLL_WEIGHT);
                netif_napi_add(vsi->netdev, &q_vector->napi,
                               i40e_napi_poll, NAPI_POLL_WEIGHT);
-               napi_hash_add(&q_vector->napi);
-       }
 
        q_vector->rx.latency_range = I40E_LOW_LATENCY;
        q_vector->tx.latency_range = I40E_LOW_LATENCY;
 
        q_vector->rx.latency_range = I40E_LOW_LATENCY;
        q_vector->tx.latency_range = I40E_LOW_LATENCY;
@@ -8199,38 +8169,192 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
 
        i40e_flush(hw);
 
 
        i40e_flush(hw);
 
-       i40e_irq_dynamic_enable_icr0(pf);
+       i40e_irq_dynamic_enable_icr0(pf, true);
 
        return err;
 }
 
 /**
 
        return err;
 }
 
 /**
- * i40e_config_rss_reg - Prepare for RSS if used
- * @pf: board private structure
+ * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
+ * @vsi: Pointer to vsi structure
  * @seed: RSS hash seed
  * @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
+ *
+ * Return 0 on success, negative on failure
  **/
  **/
-static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+                             u8 *lut, u16 lut_size)
 {
 {
-       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_hw *hw = &pf->hw;
-       u32 *seed_dw = (u32 *)seed;
-       u32 current_queue = 0;
-       u32 lut = 0;
-       int i, j;
+       int ret = 0;
+
+       if (seed) {
+               struct i40e_aqc_get_set_rss_key_data *seed_dw =
+                       (struct i40e_aqc_get_set_rss_key_data *)seed;
+               ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Cannot set RSS key, err %s aq_err %s\n",
+                                i40e_stat_str(hw, ret),
+                                i40e_aq_str(hw, hw->aq.asq_last_status));
+                       return ret;
+               }
+       }
+       if (lut) {
+               bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+
+               ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Cannot set RSS lut, err %s aq_err %s\n",
+                                i40e_stat_str(hw, ret),
+                                i40e_aq_str(hw, hw->aq.asq_last_status));
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
+ * @vsi: Pointer to vsi structure
+ * @seed: Buffter to store the hash keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Return 0 on success, negative on failure
+ */
+static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+                          u8 *lut, u16 lut_size)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       int ret = 0;
+
+       if (seed) {
+               ret = i40e_aq_get_rss_key(hw, vsi->id,
+                       (struct i40e_aqc_get_set_rss_key_data *)seed);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Cannot get RSS key, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       return ret;
+               }
+       }
+
+       if (lut) {
+               bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+
+               ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Cannot get RSS lut, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+       u8 seed[I40E_HKEY_ARRAY_SIZE];
+       struct i40e_pf *pf = vsi->back;
+       u8 *lut;
+       int ret;
+
+       if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
+               return 0;
+
+       lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+       if (!lut)
+               return -ENOMEM;
+
+       if (!vsi->rss_size)
+               vsi->rss_size = min_t(int, pf->alloc_rss_size,
+                                     vsi->num_queue_pairs);
+
+       /* Use the user configured hash keys and lookup table if there is one,
+        * otherwise use default
+        */
+       if (vsi->rss_lut_user)
+               memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+       else
+               i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+       if (vsi->rss_hkey_user)
+               memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+       else
+               netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+       ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
+       kfree(lut);
+
+       return ret;
+}
+
+/**
+ * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
+ * @vsi: Pointer to vsi structure
+ * @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
+                              const u8 *lut, u16 lut_size)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u16 vf_id = vsi->vf_id;
+       u8 i;
 
        /* Fill out hash function seed */
 
        /* Fill out hash function seed */
-       for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-               wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+       if (seed) {
+               u32 *seed_dw = (u32 *)seed;
+               if (vsi->type == I40E_VSI_MAIN) {
+                       for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+                               i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
+                                                 seed_dw[i]);
+               } else if (vsi->type == I40E_VSI_SRIOV) {
+                       for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
+                               i40e_write_rx_ctl(hw,
+                                                 I40E_VFQF_HKEY1(i, vf_id),
+                                                 seed_dw[i]);
+               } else {
+                       dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
+               }
+       }
 
 
-       for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
-               lut = 0;
-               for (j = 0; j < 4; j++) {
-                       if (current_queue == vsi->rss_size)
-                               current_queue = 0;
-                       lut |= ((current_queue) << (8 * j));
-                       current_queue++;
+       if (lut) {
+               u32 *lut_dw = (u32 *)lut;
+
+               if (vsi->type == I40E_VSI_MAIN) {
+                       if (lut_size != I40E_HLUT_ARRAY_SIZE)
+                               return -EINVAL;
+                       for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+                               wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+               } else if (vsi->type == I40E_VSI_SRIOV) {
+                       if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
+                               return -EINVAL;
+                       for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+                               i40e_write_rx_ctl(hw,
+                                                 I40E_VFQF_HLUT1(i, vf_id),
+                                                 lut_dw[i]);
+               } else {
+                       dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
                }
                }
-               wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
        }
        i40e_flush(hw);
 
        }
        i40e_flush(hw);
 
@@ -8238,37 +8362,166 @@ static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
 }
 
 /**
 }
 
 /**
- * i40e_config_rss - Prepare for RSS if used
+ * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
+                           u8 *lut, u16 lut_size)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u16 i;
+
+       if (seed) {
+               u32 *seed_dw = (u32 *)seed;
+
+               for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+                       seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+       }
+       if (lut) {
+               u32 *lut_dw = (u32 *)lut;
+
+               if (lut_size != I40E_HLUT_ARRAY_SIZE)
+                       return -EINVAL;
+               for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+                       lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_config_rss - Configure RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
+ *
+ * Returns 0 on success, negative on failure
+ */
+int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+       struct i40e_pf *pf = vsi->back;
+
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+               return i40e_config_rss_aq(vsi, seed, lut, lut_size);
+       else
+               return i40e_config_rss_reg(vsi, seed, lut, lut_size);
+}
+
+/**
+ * i40e_get_rss - Get RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the keys
+ * @lut: Buffer to store the lookup table entries
+ * lut_size: Size of buffer to store the lookup table entries
+ *
+ * Returns 0 on success, negative on failure
+ */
+int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+       struct i40e_pf *pf = vsi->back;
+
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+               return i40e_get_rss_aq(vsi, seed, lut, lut_size);
+       else
+               return i40e_get_rss_reg(vsi, seed, lut, lut_size);
+}
+
+/**
+ * i40e_fill_rss_lut - Fill the RSS lookup table with default values
+ * @pf: Pointer to board private structure
+ * @lut: Lookup table
+ * @rss_table_size: Lookup table size
+ * @rss_size: Range of queue number for hashing
+ */
+static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+                             u16 rss_table_size, u16 rss_size)
+{
+       u16 i;
+
+       for (i = 0; i < rss_table_size; i++) {
+               lut[i] = i % rss_size;
+       }
+}
+
+/**
+ * i40e_pf_config_rss - Prepare for RSS if used
  * @pf: board private structure
  **/
  * @pf: board private structure
  **/
-static int i40e_config_rss(struct i40e_pf *pf)
+static int i40e_pf_config_rss(struct i40e_pf *pf)
 {
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        u8 seed[I40E_HKEY_ARRAY_SIZE];
 {
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        u8 seed[I40E_HKEY_ARRAY_SIZE];
+       u8 *lut;
        struct i40e_hw *hw = &pf->hw;
        u32 reg_val;
        u64 hena;
        struct i40e_hw *hw = &pf->hw;
        u32 reg_val;
        u64 hena;
-
-       netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+       int ret;
 
        /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
 
        /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
-       hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
-               ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+       hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+               ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
        hena |= i40e_pf_get_default_rss_hena(pf);
 
        hena |= i40e_pf_get_default_rss_hena(pf);
 
-       wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
-       wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
-
-       vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+       i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+       i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
        /* Determine the RSS table size based on the hardware capabilities */
 
        /* Determine the RSS table size based on the hardware capabilities */
-       reg_val = rd32(hw, I40E_PFQF_CTL_0);
+       reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
        reg_val = (pf->rss_table_size == 512) ?
                        (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
                        (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
        reg_val = (pf->rss_table_size == 512) ?
                        (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
                        (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
-       wr32(hw, I40E_PFQF_CTL_0, reg_val);
+       i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
+
+       /* Determine the RSS size of the VSI */
+       if (!vsi->rss_size)
+               vsi->rss_size = min_t(int, pf->alloc_rss_size,
+                                     vsi->num_queue_pairs);
+
+       lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+       if (!lut)
+               return -ENOMEM;
+
+       /* Use user configured lut if there is one, otherwise use default */
+       if (vsi->rss_lut_user)
+               memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+       else
+               i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
 
 
-       return i40e_config_rss_reg(pf, seed);
+       /* Use user configured hash key if there is one, otherwise
+        * use default.
+        */
+       if (vsi->rss_hkey_user)
+               memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+       else
+               netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+       ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+       kfree(lut);
+
+       return ret;
+}
+
+/**
+ * i40e_clear_rss_config_user - clear the user configured RSS hash keys
+ * and lookup table
+ * @vsi: Pointer to VSI structure
+ */
+static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
+{
+       if (!vsi)
+               return;
+
+       kfree(vsi->rss_hkey_user);
+       vsi->rss_hkey_user = NULL;
+
+       kfree(vsi->rss_lut_user);
+       vsi->rss_lut_user = NULL;
 }
 
 /**
 }
 
 /**
@@ -8293,13 +8546,28 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
                vsi->req_queue_pairs = queue_count;
                i40e_prep_for_reset(pf);
 
                vsi->req_queue_pairs = queue_count;
                i40e_prep_for_reset(pf);
 
-               pf->rss_size = new_rss_size;
+               pf->alloc_rss_size = new_rss_size;
 
                i40e_reset_and_rebuild(pf, true);
 
                i40e_reset_and_rebuild(pf, true);
-               i40e_config_rss(pf);
+
+               /* Discard the user configured hash keys and lut, if less
+                * queues are enabled.
+                */
+               if (queue_count < vsi->rss_size) {
+                       i40e_clear_rss_config_user(vsi);
+                       dev_dbg(&pf->pdev->dev,
+                               "discard user configured hash keys and lut\n");
+               }
+
+               /* Reset vsi->rss_size, as number of enabled queues changed */
+               vsi->rss_size = min_t(int, pf->alloc_rss_size,
+                                     vsi->num_queue_pairs);
+
+               i40e_pf_config_rss(pf);
        }
        }
-       dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
-       return pf->rss_size;
+       dev_info(&pf->pdev->dev, "RSS count/HW max RSS count:  %d/%d\n",
+                pf->alloc_rss_size, pf->rss_size_max);
+       return pf->alloc_rss_size;
 }
 
 /**
 }
 
 /**
@@ -8447,7 +8715,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
 
        pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
                                (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
 
        pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
                                (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
-       pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
        if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
                if (I40E_DEBUG_USER & debug)
                        pf->hw.debug_mask = debug;
        if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
                if (I40E_DEBUG_USER & debug)
                        pf->hw.debug_mask = debug;
@@ -8458,16 +8725,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
        /* Set default capability flags */
        pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
                    I40E_FLAG_MSI_ENABLED     |
        /* Set default capability flags */
        pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
                    I40E_FLAG_MSI_ENABLED     |
-                   I40E_FLAG_LINK_POLLING_ENABLED |
                    I40E_FLAG_MSIX_ENABLED;
 
                    I40E_FLAG_MSIX_ENABLED;
 
-#ifdef HAVE_IOMMU_PRESENT
-       if (iommu_present(&pci_bus_type))
-               pf->flags |= I40E_FLAG_RX_PS_ENABLED;
-       else
-#endif
-               pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
-
        /* Set default ITR */
        pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
        pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
        /* Set default ITR */
        pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
        pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
@@ -8475,14 +8734,15 @@ static int i40e_sw_init(struct i40e_pf *pf)
         * maximum might end up larger than the available queues
         */
        pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
         * maximum might end up larger than the available queues
         */
        pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
-       pf->rss_size = 1;
+       pf->alloc_rss_size = 1;
        pf->rss_table_size = pf->hw.func_caps.rss_table_size;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
                                 pf->hw.func_caps.num_tx_qp);
 
        if (pf->hw.func_caps.rss) {
                pf->flags |= I40E_FLAG_RSS_ENABLED;
        pf->rss_table_size = pf->hw.func_caps.rss_table_size;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
                                 pf->hw.func_caps.num_tx_qp);
 
        if (pf->hw.func_caps.rss) {
                pf->flags |= I40E_FLAG_RSS_ENABLED;
-               pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
+               pf->alloc_rss_size = min_t(int, pf->rss_size_max,
+                                          num_online_cpus());
        }
        /* MFP mode enabled */
        if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
        }
        /* MFP mode enabled */
        if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
@@ -8518,6 +8778,56 @@ static int i40e_sw_init(struct i40e_pf *pf)
                                 pf->hw.func_caps.fd_filters_best_effort;
        }
 
                                 pf->hw.func_caps.fd_filters_best_effort;
        }
 
+       if (pf->hw.mac.type == I40E_MAC_X722) {
+               pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE
+                            | I40E_FLAG_128_QP_RSS_CAPABLE
+                            | I40E_FLAG_HW_ATR_EVICT_CAPABLE
+                            | I40E_FLAG_WB_ON_ITR_CAPABLE
+                            | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE
+                            | I40E_FLAG_NO_PCI_LINK_CHECK
+                            | I40E_FLAG_100M_SGMII_CAPABLE
+                            | I40E_FLAG_USE_SET_LLDP_MIB
+                            | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+
+#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
+               if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
+                                               I40E_FDEVICT_PCTYPE_DEFAULT) {
+                       dev_warn(&pf->pdev->dev, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
+                       pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+               }
+       } else if ((pf->hw.aq.api_maj_ver > 1) ||
+                  ((pf->hw.aq.api_maj_ver == 1) &&
+                   (pf->hw.aq.api_min_ver > 4))) {
+               /* Supported in FW API version higher than 1.4 */
+               pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+               pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+
+               /* supports mpls header skip and csum for following headers */
+               pf->flags |= I40E_FLAG_MPLS_HDR_OFFLOAD_CAPABLE;
+       } else {
+               pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+       }
+
+       if (i40e_is_mac_710(&pf->hw) &&
+           (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4))) {
+               pf->flags |= I40E_FLAG_RESTART_AUTONEG;
+               /* No DCB support  for FW < v4.33 */
+               pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
+       }
+
+       /* Disable FW LLDP if FW < v4.3 */
+       if (i40e_is_mac_710(&pf->hw) &&
+           (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
+           (pf->hw.aq.fw_maj_ver < 4)))
+               pf->flags |= I40E_FLAG_STOP_FW_LLDP;
+
+       /* Use the FW Set LLDP MIB API if FW > v4.40 */
+       if (i40e_is_mac_710(&pf->hw) &&
+           (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
+           (pf->hw.aq.fw_maj_ver >= 5)))
+               pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
+
        if (pf->hw.func_caps.vmdq) {
                pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
                pf->flags |= I40E_FLAG_VMDQ_ENABLED;
        if (pf->hw.func_caps.vmdq) {
                pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
                pf->flags |= I40E_FLAG_VMDQ_ENABLED;
@@ -8662,26 +8972,31 @@ static int i40e_set_features(struct net_device *netdev,
 }
 
 #endif /* HAVE_NDO_SET_FEATURES */
 }
 
 #endif /* HAVE_NDO_SET_FEATURES */
-#ifdef HAVE_VXLAN_RX_OFFLOAD
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
 /**
 /**
- * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
  * @pf: board private structure
  * @port: The UDP port to look up
  *
  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
  **/
  * @pf: board private structure
  * @port: The UDP port to look up
  *
  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
  **/
-static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
 {
        u8 i;
 
        for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
 {
        u8 i;
 
        for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-               if (pf->vxlan_ports[i] == port)
+               if (pf->udp_ports[i].index == port)
                        return i;
        }
 
        return i;
 }
 
                        return i;
        }
 
        return i;
 }
 
+#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD || HAVE_GENEVE_RX_OFFLOAD */
+#if defined(HAVE_VXLAN_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_VXLAN)
 /**
  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
  * @netdev: This physical port's netdev
 /**
  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
  * @netdev: This physical port's netdev
@@ -8697,10 +9012,10 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        u8 next_idx;
        u8 idx;
 
        u8 next_idx;
        u8 idx;
 
-       if (sa_family == AF_INET6)
+       if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
                return;
 
                return;
 
-       idx = i40e_get_vxlan_port_idx(pf, port);
+       idx = i40e_get_udp_port_idx(pf, port);
 
        /* Check if port already exists */
        if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
 
        /* Check if port already exists */
        if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
@@ -8710,7 +9025,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        }
 
        /* Now check if there is space to add the new port */
        }
 
        /* Now check if there is space to add the new port */
-       next_idx = i40e_get_vxlan_port_idx(pf, 0);
+       next_idx = i40e_get_udp_port_idx(pf, 0);
 
        if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
                netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
 
        if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
                netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
@@ -8719,9 +9034,10 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        }
 
        /* New port: add it and mark its index in the bitmap */
        }
 
        /* New port: add it and mark its index in the bitmap */
-       pf->vxlan_ports[next_idx] = port;
-       pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
-       pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+       pf->udp_ports[next_idx].index = port;
+       pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
+       pf->pending_udp_bitmap |= BIT_ULL(next_idx);
+       pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
 }
 
 /**
 }
 
 /**
@@ -8738,26 +9054,106 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
        struct i40e_pf *pf = vsi->back;
        u8 idx;
 
        struct i40e_pf *pf = vsi->back;
        u8 idx;
 
-       if (sa_family == AF_INET6)
+       if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
                return;
 
                return;
 
-       idx = i40e_get_vxlan_port_idx(pf, port);
+       idx = i40e_get_udp_port_idx(pf, port);
 
        /* Check if port already exists */
        if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
                /* if port exists, set it to 0 (mark for deletion)
                 * and make it pending
                 */
 
        /* Check if port already exists */
        if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
                /* if port exists, set it to 0 (mark for deletion)
                 * and make it pending
                 */
-               pf->vxlan_ports[idx] = 0;
-               pf->pending_vxlan_bitmap |= BIT_ULL(idx);
-               pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+               pf->udp_ports[idx].index = 0;
+               pf->pending_udp_bitmap |= BIT_ULL(idx);
+               pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
        } else {
                netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
                            ntohs(port));
        }
 }
 
        } else {
                netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
                            ntohs(port));
        }
 }
 
+#endif /* CONFIG_VXLAN */
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
+#if defined(HAVE_GENEVE_RX_OFFLOAD)
+#if IS_ENABLED(CONFIG_GENEVE)
+/**
+ * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that GENEVE is notifying us about
+ * @port: New UDP port number that GENEVE started listening to
+ **/
+static void i40e_add_geneve_port(struct net_device *netdev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       u8 next_idx;
+       u8 idx;
+
+       idx = i40e_get_udp_port_idx(pf, port);
+
+       /* Check if port already exists */
+       if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+               netdev_info(netdev, "udp port %d already offloaded\n",
+                           ntohs(port));
+               return;
+       }
+
+       /* Now check if there is space to add the new port */
+       next_idx = i40e_get_udp_port_idx(pf, 0);
+
+       if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+               netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n",
+                           ntohs(port));
+               return;
+       }
+
+       /* New port: add it and mark its index in the bitmap */
+       pf->udp_ports[next_idx].index = port;
+       pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
+       pf->pending_udp_bitmap |= BIT_ULL(next_idx);
+       pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+
+       dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
+}
+
+/**
+ * i40e_del_geneve_port - Get notifications about GENEVE ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that GENEVE is notifying us about
+ * @port: UDP port number that GENEVE stopped listening to
+ **/
+static void i40e_del_geneve_port(struct net_device *netdev,
+                                sa_family_t sa_family, __be16 port)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       u8 idx;
+
+       idx = i40e_get_udp_port_idx(pf, port);
+
+       /* Check if port already exists */
+       if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+               /* if port exists, set it to 0 (mark for deletion)
+                * and make it pending
+                */
+               pf->udp_ports[idx].index = 0;
+               pf->pending_udp_bitmap |= BIT_ULL(idx);
+               pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+
+               dev_info(&pf->pdev->dev, "deleting geneve port %d\n",
+                        ntohs(port));
+       } else {
+               netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
+                           ntohs(port));
+       }
+}
+
+#endif /* CONFIG_GENEVE */
+#endif /* HAVE_GENEVE_RX_OFFLOAD */
 #ifdef HAVE_NDO_GET_PHYS_PORT_ID
 static int i40e_get_phys_port_id(struct net_device *netdev,
                                 struct netdev_phys_item_id *ppid)
 #ifdef HAVE_NDO_GET_PHYS_PORT_ID
 static int i40e_get_phys_port_id(struct net_device *netdev,
                                 struct netdev_phys_item_id *ppid)
@@ -8826,7 +9222,10 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm,
 }
 
 #ifdef HAVE_NDO_FEATURES_CHECK
 }
 
 #ifdef HAVE_NDO_FEATURES_CHECK
-#define I40E_MAX_TUNNEL_HDR_LEN 80
+/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
+ * inner mac plus all inner ethertypes.
+ */
+#define I40E_MAX_TUNNEL_HDR_LEN 128
 /**
  * i40e_features_check - Validate encapsulated packet conforms to limits
  * @skb: skb buff
 /**
  * i40e_features_check - Validate encapsulated packet conforms to limits
  * @skb: skb buff
@@ -8837,43 +9236,16 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
                                             struct net_device *dev,
                                             netdev_features_t features)
 {
                                             struct net_device *dev,
                                             netdev_features_t features)
 {
-       u8 protocol = 0;
-
        if (!skb->encapsulation)
                return features;
 
        /* prevent tunnel headers that are too long to offload from
         * being sent to the hardware
         */
        if (!skb->encapsulation)
                return features;
 
        /* prevent tunnel headers that are too long to offload from
         * being sent to the hardware
         */
-       if (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+       if (skb_inner_network_header(skb) - skb_transport_header(skb) >
            I40E_MAX_TUNNEL_HDR_LEN)
            I40E_MAX_TUNNEL_HDR_LEN)
-               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
-
-       /* this is a somewhat temporary patch to prevent the driver
-        * from trying to offload tunnels it cannot support
-        * currently the only supported tunnel is VxLAN,
-        * this code looks like vxlan_features_check but is not
-        * the same.
-        */
-
-       switch (vlan_get_protocol(skb)) {
-       case htons(ETH_P_IP):
-               protocol = ip_hdr(skb)->protocol;
-               break;
-       case htons(ETH_P_IPV6):
-               protocol = ipv6_hdr(skb)->nexthdr;
-               break;
-       default:
-               return features;;
-       }
+               return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 
 
-       if ((protocol != IPPROTO_UDP) ||
-           (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
-            skb->inner_protocol != htons(ETH_P_TEB) ||
-            (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
-             sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
-               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
-       
        return features;
 }
 
        return features;
 }
 
@@ -9070,7 +9442,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
                                       0, 0, nlflags);
 #elif defined(HAVE_NDO_FDB_ADD_VID) || \
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
                                       0, 0, nlflags);
 #elif defined(HAVE_NDO_FDB_ADD_VID) || \
-       defined NDO_BRIDGE_GETLINK_HAS_FILTER_MASK_PARAM
+       defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
                                       0, 0);
 #else
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
                                       0, 0);
 #else
@@ -9107,7 +9479,11 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_poll_controller    = i40e_netpoll,
 #endif
 #ifdef HAVE_SETUP_TC
        .ndo_poll_controller    = i40e_netpoll,
 #endif
 #ifdef HAVE_SETUP_TC
+#ifdef NETIF_F_HW_TC
+       .ndo_setup_tc           = __i40e_setup_tc,
+#else
        .ndo_setup_tc           = i40e_setup_tc,
        .ndo_setup_tc           = i40e_setup_tc,
+#endif
 #endif /* HAVE_SETUP_TC */
 #ifdef I40E_FCOE
        .ndo_fcoe_enable        = i40e_fcoe_enable,
 #endif /* HAVE_SETUP_TC */
 #ifdef I40E_FCOE
        .ndo_fcoe_enable        = i40e_fcoe_enable,
@@ -9122,17 +9498,25 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_set_vf_tx_rate     = i40e_ndo_set_vf_bw,
 #endif
        .ndo_get_vf_config      = i40e_ndo_get_vf_config,
        .ndo_set_vf_tx_rate     = i40e_ndo_set_vf_bw,
 #endif
        .ndo_get_vf_config      = i40e_ndo_get_vf_config,
-#ifdef HAVE_NDO_SET_VF_LINK_STATE
-       .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
-#endif
 #ifdef HAVE_VF_SPOOFCHK_CONFIGURE
        .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
 #endif
 #ifdef HAVE_VF_SPOOFCHK_CONFIGURE
        .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
 #endif
+#ifdef HAVE_NDO_SET_VF_TRUST
+       .ndo_set_vf_trust       = i40e_ndo_set_vf_trust,
+#endif
 #endif /* IFLA_VF_MAX */
 #ifdef HAVE_VXLAN_RX_OFFLOAD
 #endif /* IFLA_VF_MAX */
 #ifdef HAVE_VXLAN_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_VXLAN)
        .ndo_add_vxlan_port     = i40e_add_vxlan_port,
        .ndo_del_vxlan_port     = i40e_del_vxlan_port,
        .ndo_add_vxlan_port     = i40e_add_vxlan_port,
        .ndo_del_vxlan_port     = i40e_del_vxlan_port,
+#endif
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
+#ifdef HAVE_GENEVE_RX_OFFLOAD
+#if IS_ENABLED(CONFIG_GENEVE)
+       .ndo_add_geneve_port    = i40e_add_geneve_port,
+       .ndo_del_geneve_port    = i40e_del_geneve_port,
+#endif
+#endif /* HAVE_GENEVE_RX_OFFLOAD */
 #ifdef HAVE_NDO_GET_PHYS_PORT_ID
        .ndo_get_phys_port_id   = i40e_get_phys_port_id,
 #endif /* HAVE_NDO_GET_PHYS_PORT_ID */
 #ifdef HAVE_NDO_GET_PHYS_PORT_ID
        .ndo_get_phys_port_id   = i40e_get_phys_port_id,
 #endif /* HAVE_NDO_GET_PHYS_PORT_ID */
@@ -9160,6 +9544,9 @@ static const struct net_device_ops_ext i40e_netdev_ops_ext = {
 #ifdef HAVE_NDO_SET_FEATURES
        .ndo_set_features       = i40e_set_features,
 #endif /* HAVE_NDO_SET_FEATURES */
 #ifdef HAVE_NDO_SET_FEATURES
        .ndo_set_features       = i40e_set_features,
 #endif /* HAVE_NDO_SET_FEATURES */
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
+#endif
 };
 
 #endif /* HAVE_NET_DEVICE_OPS */
 };
 
 #endif /* HAVE_NET_DEVICE_OPS */
@@ -9236,41 +9623,48 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        np = netdev_priv(netdev);
        np->vsi = vsi;
 #ifdef HAVE_ENCAP_CSUM_OFFLOAD
        np = netdev_priv(netdev);
        np->vsi = vsi;
 #ifdef HAVE_ENCAP_CSUM_OFFLOAD
-       netdev->hw_enc_features = NETIF_F_IP_CSUM        |
-                                 NETIF_F_SCTP_CSUM      |
+       netdev->hw_enc_features |= NETIF_F_IP_CSUM             |
+                                  NETIF_F_IPV6_CSUM           |
+                                  NETIF_F_TSO                 |
 #ifdef HAVE_ENCAP_TSO_OFFLOAD
 #ifdef HAVE_ENCAP_TSO_OFFLOAD
-                                 NETIF_F_GSO_UDP_TUNNEL |
-                                 NETIF_F_TSO            |
+                                  NETIF_F_TSO6                |
+                                  NETIF_F_TSO_ECN             |
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
+                                  NETIF_F_GSO_GRE             |
+#endif
+                                  NETIF_F_GSO_UDP_TUNNEL      |
+                                  NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                                  0;
+#else /* HAVE_ENCAP_TSO_OFFLOAD */
+                                  NETIF_F_SG;
 #endif /* HAVE_ENCAP_TSO_OFFLOAD */
 #endif /* HAVE_ENCAP_TSO_OFFLOAD */
-                                 NETIF_F_SG;
 #endif /* HAVE_ENCAP_CSUM_OFFLOAD */
 
        netdev->features = NETIF_F_SG                  |
                           NETIF_F_IP_CSUM             |
 #endif /* HAVE_ENCAP_CSUM_OFFLOAD */
 
        netdev->features = NETIF_F_SG                  |
                           NETIF_F_IP_CSUM             |
-                          NETIF_F_SCTP_CSUM           |
+                          NETIF_F_SCTP_CRC            |
                           NETIF_F_HIGHDMA             |
 #ifdef HAVE_ENCAP_TSO_OFFLOAD
                           NETIF_F_GSO_UDP_TUNNEL      |
                           NETIF_F_HIGHDMA             |
 #ifdef HAVE_ENCAP_TSO_OFFLOAD
                           NETIF_F_GSO_UDP_TUNNEL      |
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
+                          NETIF_F_GSO_GRE             |
+#endif
 #endif
 #ifdef NETIF_F_HW_VLAN_CTAG_RX
                           NETIF_F_HW_VLAN_CTAG_TX     |
                           NETIF_F_HW_VLAN_CTAG_RX     |
                           NETIF_F_HW_VLAN_CTAG_FILTER |
 #endif
 #ifdef NETIF_F_HW_VLAN_CTAG_RX
                           NETIF_F_HW_VLAN_CTAG_TX     |
                           NETIF_F_HW_VLAN_CTAG_RX     |
                           NETIF_F_HW_VLAN_CTAG_FILTER |
-#else
+#else /* NETIF_F_HW_VLAN_CTAG_RX */
                           NETIF_F_HW_VLAN_TX          |
                           NETIF_F_HW_VLAN_RX          |
                           NETIF_F_HW_VLAN_FILTER      |
                           NETIF_F_HW_VLAN_TX          |
                           NETIF_F_HW_VLAN_RX          |
                           NETIF_F_HW_VLAN_FILTER      |
-#endif
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
 #ifdef NETIF_F_IPV6_CSUM
                           NETIF_F_IPV6_CSUM           |
 #endif
 #ifdef NETIF_F_IPV6_CSUM
                           NETIF_F_IPV6_CSUM           |
 #endif
-#ifdef NETIF_F_TSO
                           NETIF_F_TSO                 |
                           NETIF_F_TSO_ECN             |
                           NETIF_F_TSO                 |
                           NETIF_F_TSO_ECN             |
-#ifdef NETIF_F_TSO6
                           NETIF_F_TSO6                |
                           NETIF_F_TSO6                |
-#endif /* NETIF_F_TSO6 */
-#endif /* NETIF_F_TSO */
 #ifdef HAVE_NDO_SET_FEATURES
                           NETIF_F_RXCSUM              |
 #endif
 #ifdef HAVE_NDO_SET_FEATURES
                           NETIF_F_RXCSUM              |
 #endif
@@ -9283,6 +9677,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
                netdev->features |= NETIF_F_NTUPLE;
 #endif
        if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
                netdev->features |= NETIF_F_NTUPLE;
 #endif
+       if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
+               netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
 #ifdef HAVE_NDO_SET_FEATURES
        /* copy netdev features into list of user selectable features */
 
 #ifdef HAVE_NDO_SET_FEATURES
        /* copy netdev features into list of user selectable features */
@@ -9346,6 +9742,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                                                     NETIF_F_HW_VLAN_RX);
 #endif
 #endif /* HAVE_NETDEV_VLAN_FEATURES */
                                                     NETIF_F_HW_VLAN_RX);
 #endif
 #endif /* HAVE_NETDEV_VLAN_FEATURES */
+
+#ifdef HAVE_MPLS_FEATURES
+       if (pf->flags & I40E_FLAG_MPLS_HDR_OFFLOAD_CAPABLE)
+               netdev->mpls_features =  NETIF_F_HW_CSUM;
+#endif
+
 #ifdef IFF_UNICAST_FLT
        netdev->priv_flags |= IFF_UNICAST_FLT;
 #endif
 #ifdef IFF_UNICAST_FLT
        netdev->priv_flags |= IFF_UNICAST_FLT;
 #endif
@@ -9569,7 +9971,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
 
                ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
                ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
 
                ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
                ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
                if (pf->vf[vsi->vf_id].spoofchk) {
                        ctxt.info.valid_sections |=
                                cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
                if (pf->vf[vsi->vf_id].spoofchk) {
                        ctxt.info.valid_sections |=
                                cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
@@ -9577,7 +9978,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                                (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
                                 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
                }
                                (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
                                 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
                }
-#endif
                /* Setup the VSI tx/rx queue map for TC0 only for now */
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
                /* Setup the VSI tx/rx queue map for TC0 only for now */
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
@@ -9720,7 +10120,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                                f->is_vf, f->is_netdev);
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
                                f->is_vf, f->is_netdev);
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
-       i40e_sync_vsi_filters(vsi, false);
+       i40e_sync_vsi_filters(vsi);
 
        i40e_vsi_delete(vsi);
        i40e_vsi_free_q_vectors(vsi);
 
        i40e_vsi_delete(vsi);
        i40e_vsi_free_q_vectors(vsi);
@@ -9830,10 +10230,15 @@ vector_setup_out:
  **/
 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
 {
  **/
 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
 {
-       struct i40e_pf *pf = vsi->back;
+       struct i40e_pf *pf;
        u8 enabled_tc;
        int ret;
 
        u8 enabled_tc;
        int ret;
 
+       if (!vsi)
+               return NULL;
+
+       pf = vsi->back;
+
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_vsi_clear_rings(vsi);
 
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_vsi_clear_rings(vsi);
 
@@ -10051,6 +10456,11 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                /* no netdev or rings for the other VSI types */
                break;
        }
                /* no netdev or rings for the other VSI types */
                break;
        }
+
+       if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+           (vsi->type == I40E_VSI_VMDQ2)) {
+               ret = i40e_vsi_config_rss(vsi);
+       }
        return vsi;
 
 err_rings:
        return vsi;
 
 err_rings:
@@ -10288,13 +10698,13 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = veb->pf;
        bool is_default = pf->cur_promisc;
 {
        struct i40e_pf *pf = veb->pf;
        bool is_default = pf->cur_promisc;
-       bool is_cloud = false;
+       bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
        int ret;
 
        /* get a VEB from the hardware */
        ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
                              veb->enabled_tc, is_default,
        int ret;
 
        /* get a VEB from the hardware */
        ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
                              veb->enabled_tc, is_default,
-                             is_cloud, &veb->seid, NULL);
+                             &veb->seid, enable_stats, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
                         "couldn't add VEB, err %s aq_err %s\n",
        if (ret) {
                dev_info(&pf->pdev->dev,
                         "couldn't add VEB, err %s aq_err %s\n",
@@ -10563,6 +10973,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
  **/
 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 {
  **/
 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 {
+       u16 flags = 0;
        int ret;
 
        /* find out what's out there already */
        int ret;
 
        /* find out what's out there already */
@@ -10576,6 +10987,31 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
        }
        i40e_pf_reset_stats(pf);
 
        }
        i40e_pf_reset_stats(pf);
 
+       /* set the switch config bit for the whole device to
+        * support limited promisc or true promisc
+        * when user requests promisc. The default is limited
+        * promisc.
+        */
+       if ((pf->hw.pf_id == 0) &&
+           !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+               flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+
+       if (pf->hw.pf_id == 0) {
+               u16 valid_flags;
+
+               valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+               ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
+                                               NULL);
+               if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+                       dev_info(&pf->pdev->dev,
+                                "couldn't set switch config bits, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                pf->hw.aq.asq_last_status));
+                       /* not a fatal problem, just keep going */
+               }
+       }
+
        /* first time setup */
        if (pf->lan_vsi == I40E_NO_VSI || reinit) {
                struct i40e_vsi *vsi = NULL;
        /* first time setup */
        if (pf->lan_vsi == I40E_NO_VSI || reinit) {
                struct i40e_vsi *vsi = NULL;
@@ -10621,7 +11057,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
         * the hash
         */
        if ((pf->flags & I40E_FLAG_RSS_ENABLED))
         * the hash
         */
        if ((pf->flags & I40E_FLAG_RSS_ENABLED))
-               i40e_config_rss(pf);
+               i40e_pf_config_rss(pf);
 
        /* fill in link information and enable LSE reporting */
        i40e_update_link_info(&pf->hw);
 
        /* fill in link information and enable LSE reporting */
        i40e_update_link_info(&pf->hw);
@@ -10661,7 +11097,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
            !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
                /* one qp for PF, no queues for anything else */
                queues_left = 0;
            !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
                /* one qp for PF, no queues for anything else */
                queues_left = 0;
-               pf->rss_size = pf->num_lan_qps = 1;
+               pf->alloc_rss_size = pf->num_lan_qps = 1;
 
                /* make sure all the fancies are disabled */
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
 
                /* make sure all the fancies are disabled */
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
@@ -10678,7 +11114,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                                  I40E_FLAG_FD_ATR_ENABLED |
                                  I40E_FLAG_DCB_CAPABLE))) {
                /* one qp for PF */
                                  I40E_FLAG_FD_ATR_ENABLED |
                                  I40E_FLAG_DCB_CAPABLE))) {
                /* one qp for PF */
-               pf->rss_size = pf->num_lan_qps = 1;
+               pf->alloc_rss_size = pf->num_lan_qps = 1;
                queues_left -= pf->num_lan_qps;
 
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
                queues_left -= pf->num_lan_qps;
 
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
@@ -10748,8 +11184,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
                pf->hw.func_caps.num_tx_qp,
                !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
                "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
                pf->hw.func_caps.num_tx_qp,
                !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
-               pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
-               pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
+               pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
+               pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
+               queues_left);
 #ifdef I40E_FCOE
        dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
 #endif
 #ifdef I40E_FCOE
        dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
 #endif
@@ -10787,54 +11224,60 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
 }
 
 #define INFO_STRING_LEN 255
 }
 
 #define INFO_STRING_LEN 255
+#define REMAIN(__x) (INFO_STRING_LEN - (__x))
 static void i40e_print_features(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
 static void i40e_print_features(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
-       char *buf, *string;
+       char *buf;
+       int i;
 
 
-       string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
-       if (!string) {
-               dev_err(&pf->pdev->dev, "Features string allocation failed\n");
+       buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
+       if (!buf)
                return;
                return;
-       }
 
 
-       buf = string;
-
-       buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
+       i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
 #ifdef CONFIG_PCI_IOV
 #ifdef CONFIG_PCI_IOV
-       buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
+       i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
 #endif
 #endif
-       buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", pf->hw.func_caps.num_vsis,
-                      pf->vsi[pf->lan_vsi]->num_queue_pairs,
-                      pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
-
+       i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
+                     pf->hw.func_caps.num_vsis,
+                     pf->vsi[pf->lan_vsi]->num_queue_pairs);
        if (pf->flags & I40E_FLAG_RSS_ENABLED)
        if (pf->flags & I40E_FLAG_RSS_ENABLED)
-               buf += sprintf(buf, "RSS ");
+               i += snprintf(&buf[i], REMAIN(i), " RSS");
        if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
        if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
-               buf += sprintf(buf, "FD_ATR ");
+               i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
-               buf += sprintf(buf, "FD_SB ");
-               buf += sprintf(buf, "NTUPLE ");
+               i += snprintf(&buf[i], REMAIN(i), " FD_SB");
+               i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
        }
        }
+       i += snprintf(&buf[i], REMAIN(i), " CloudF");
        if (pf->flags & I40E_FLAG_DCB_CAPABLE)
        if (pf->flags & I40E_FLAG_DCB_CAPABLE)
-               buf += sprintf(buf, "DCB ");
-       buf += sprintf(buf, "VxLAN ");
+               i += snprintf(&buf[i], REMAIN(i), " DCB");
+#if IS_ENABLED(CONFIG_VXLAN)
+       i += snprintf(&buf[i], REMAIN(i), " VxLAN");
+#endif
+#if IS_ENABLED(CONFIG_GENEVE)
+       i += snprintf(&buf[i], REMAIN(i), " Geneve");
+#endif
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
+       i += snprintf(&buf[i], REMAIN(i), " NVGRE");
+#endif
 #ifdef HAVE_PTP_1588_CLOCK
        if (pf->flags & I40E_FLAG_PTP)
 #ifdef HAVE_PTP_1588_CLOCK
        if (pf->flags & I40E_FLAG_PTP)
-               buf += sprintf(buf, "PTP ");
+               i += snprintf(&buf[i], REMAIN(i), " PTP");
 #endif
 #ifdef I40E_FCOE
        if (pf->flags & I40E_FLAG_FCOE_ENABLED)
 #endif
 #ifdef I40E_FCOE
        if (pf->flags & I40E_FLAG_FCOE_ENABLED)
-               buf += sprintf(buf, "FCOE ");
+               i += snprintf(&buf[i], REMAIN(i), " FCOE");
 #endif
        if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
 #endif
        if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
-               buf += sprintf(buf, "VEB ");
+               i += snprintf(&buf[i], REMAIN(i), " VEB");
        else
        else
-               buf += sprintf(buf, "VEPA ");
+               i += snprintf(&buf[i], REMAIN(i), " VEPA");
 
 
-       BUG_ON(buf > (string + INFO_STRING_LEN));
-       dev_info(&pf->pdev->dev, "%s\n", string);
-       kfree(string);
+       dev_info(&pf->pdev->dev, "%s\n", buf);
+       kfree(buf);
+       WARN_ON(i > INFO_STRING_LEN);
 }
 
 /**
 }
 
 /**
@@ -10862,7 +11305,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        u16 wol_nvm_bits;
        u16 link_status;
        int err = 0;
        u16 wol_nvm_bits;
        u16 link_status;
        int err = 0;
-       u32 len;
+       u32 val;
        u32 i;
        u8 set_fc_aq_fail;
 
        u32 i;
        u8 set_fc_aq_fail;
 
@@ -10930,6 +11373,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->bus.func = PCI_FUNC(pdev->devfn);
        pf->instance = pfs_found;
 
        hw->bus.func = PCI_FUNC(pdev->devfn);
        pf->instance = pfs_found;
 
+       /* set up the spinlocks for the AQ, do this only once in probe
+        * and destroy them only once in remove
+        */
+       i40e_init_spinlock_d(&hw->aq.asq_spinlock);
+       i40e_init_spinlock_d(&hw->aq.arq_spinlock);
+
        if (debug != -1)
                pf->msg_enable = pf->hw.debug_mask = debug;
 
        if (debug != -1)
                pf->msg_enable = pf->hw.debug_mask = debug;
 
@@ -10963,6 +11412,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pf->hw.fc.requested_mode = I40E_FC_NONE;
 
        err = i40e_init_adminq(hw);
        pf->hw.fc.requested_mode = I40E_FC_NONE;
 
        err = i40e_init_adminq(hw);
+       if (err) {
+               if (err == I40E_ERR_FIRMWARE_API_VERSION)
+                       dev_info(&pdev->dev,
+                                "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+               else
+                       dev_info(&pdev->dev,
+                                "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
+
+               goto err_pf_reset;
+       }
 
        /* provide nvm, fw, api versions */
        dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
 
        /* provide nvm, fw, api versions */
        dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
@@ -10970,12 +11429,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                 hw->aq.api_maj_ver, hw->aq.api_min_ver,
                 i40e_nvm_version_str(hw));
 
                 hw->aq.api_maj_ver, hw->aq.api_min_ver,
                 i40e_nvm_version_str(hw));
 
-       if (err) {
-               dev_info(&pdev->dev,
-                        "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
-               goto err_pf_reset;
-       }
-
        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
            hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
                dev_info(&pdev->dev,
        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
            hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
                dev_info(&pdev->dev,
@@ -11021,8 +11474,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * Ignore error return codes because if it was already disabled via
         * hardware settings this will fail
         */
         * Ignore error return codes because if it was already disabled via
         * hardware settings this will fail
         */
-       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
-           (pf->hw.aq.fw_maj_ver < 4)) {
+       if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
                dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
                i40e_aq_stop_lldp(hw, true, NULL);
        }
                dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
                i40e_aq_stop_lldp(hw, true, NULL);
        }
@@ -11074,7 +11526,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if (BIT_ULL(hw->port) & wol_nvm_bits || hw->partition_id != 1)
+       if (BIT(hw->port) & wol_nvm_bits || hw->partition_id != 1)
                pf->wol_en = false;
        else
                pf->wol_en = true;
                pf->wol_en = false;
        else
                pf->wol_en = true;
@@ -11097,8 +11549,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
 
        /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
                pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
 
        /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
-       len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
-       pf->vsi = kzalloc(len, GFP_KERNEL);
+       pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
+                         GFP_KERNEL);
        if (!pf->vsi) {
                err = -ENOMEM;
                goto err_switch_setup;
        if (!pf->vsi) {
                err = -ENOMEM;
                goto err_switch_setup;
@@ -11149,19 +11601,30 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
                }
        }
 
-       /* driver is only interested in link up/down and module qualification
-        * reports from firmware
+       /* The driver only wants link up/down and module qualification
+        * reports from firmware.  Note the negative logic.
         */
        err = i40e_aq_set_phy_int_mask(&pf->hw,
         */
        err = i40e_aq_set_phy_int_mask(&pf->hw,
-                                      I40E_AQ_EVENT_LINK_UPDOWN |
-                                      I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+                                      ~(I40E_AQ_EVENT_LINK_UPDOWN |
+                                        I40E_AQ_EVENT_MEDIA_NA |
+                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
        if (err)
                dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
                         i40e_stat_str(&pf->hw, err),
                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        if (err)
                dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
                         i40e_stat_str(&pf->hw, err),
                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
-       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
-           (pf->hw.aq.fw_maj_ver < 4)) {
+       /* Reconfigure hardware for allowing smaller MSS in the case
+        * of TSO, so that we avoid the MDD being fired and causing
+        * a reset in the case of small MSS+TSO.
+        */
+       val = rd32(hw, I40E_REG_MSS);
+       if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
+               val &= ~I40E_REG_MSS_MIN_MASK;
+               val |= I40E_64BYTE_MSS;
+               wr32(hw, I40E_REG_MSS, val);
+       }
+
+       if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
                msleep(75);
                err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (err)
                msleep(75);
                err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (err)
@@ -11195,8 +11658,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
            (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
            !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
        if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
            (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
            !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
-               u32 val;
-
                /* disable link interrupts for VFs */
                val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
                val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
                /* disable link interrupts for VFs */
                val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
                val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
@@ -11314,6 +11775,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
                                                        pf->main_vsi_seid);
 
        i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
                                                        pf->main_vsi_seid);
 
+       if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
+           (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+               pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
+
        /* print a string summarizing features */
        i40e_print_features(pf);
 
        /* print a string summarizing features */
        i40e_print_features(pf);
 
@@ -11334,7 +11799,6 @@ err_init_lan_hmc:
        kfree(pf->qp_pile);
 err_sw_init:
 err_adminq_setup:
        kfree(pf->qp_pile);
 err_sw_init:
 err_adminq_setup:
-       (void)i40e_shutdown_adminq(hw);
 err_pf_reset:
        dev_warn(&pdev->dev, "previous errors forcing module to load in debug mode\n");
        i40e_dbg_pf_init(pf);
 err_pf_reset:
        dev_warn(&pdev->dev, "previous errors forcing module to load in debug mode\n");
        i40e_dbg_pf_init(pf);
@@ -11368,28 +11832,29 @@ static void i40e_remove(struct pci_dev *pdev)
 #endif
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
 #endif
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
-#ifdef HAVE_PTP_1588_CLOCK
        struct i40e_hw *hw = &pf->hw;
        struct i40e_hw *hw = &pf->hw;
-#endif /* HAVE_PTP_1588_CLOCK */
        i40e_status ret_code;
        int i;
 
        i40e_dbg_pf_exit(pf);
        i40e_status ret_code;
        int i;
 
        i40e_dbg_pf_exit(pf);
-       if (test_bit(__I40E_DEBUG_MODE, &pf->state))
-               goto unmap;
-
 #ifdef HAVE_PTP_1588_CLOCK
        i40e_ptp_stop(pf);
 
        /* Disable RSS in hw */
 #ifdef HAVE_PTP_1588_CLOCK
        i40e_ptp_stop(pf);
 
        /* Disable RSS in hw */
-       wr32(hw, I40E_PFQF_HENA(0), 0);
-       wr32(hw, I40E_PFQF_HENA(1), 0);
+       i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
+       i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
 
 #endif /* HAVE_PTP_1588_CLOCK */
        /* no more scheduling of any task */
 
 #endif /* HAVE_PTP_1588_CLOCK */
        /* no more scheduling of any task */
+       set_bit(__I40E_SUSPENDED, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
-       del_timer_sync(&pf->service_timer);
-       cancel_work_sync(&pf->service_task);
+       if (pf->service_timer.data)
+               del_timer_sync(&pf->service_timer);
+       if (pf->service_task.func)
+               cancel_work_sync(&pf->service_task);
+
+       if (test_bit(__I40E_DEBUG_MODE, &pf->state))
+               goto unmap;
 
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
                i40e_free_vfs(pf);
 
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
                i40e_free_vfs(pf);
@@ -11417,21 +11882,26 @@ static void i40e_remove(struct pci_dev *pdev)
                i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
        /* shutdown and destroy the HMC */
                i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
        /* shutdown and destroy the HMC */
-       if (pf->hw.hmc.hmc_obj) {
-               ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+       if (hw->hmc.hmc_obj) {
+               ret_code = i40e_shutdown_lan_hmc(hw);
                if (ret_code)
                        dev_warn(&pdev->dev,
                                 "Failed to destroy the HMC resources: %d\n",
                                 ret_code);
        }
 
                if (ret_code)
                        dev_warn(&pdev->dev,
                                 "Failed to destroy the HMC resources: %d\n",
                                 ret_code);
        }
 
+unmap:
        /* shutdown the adminq */
        /* shutdown the adminq */
-       ret_code = i40e_shutdown_adminq(&pf->hw);
+       ret_code = i40e_shutdown_adminq(hw);
        if (ret_code)
                dev_warn(&pdev->dev,
                         "Failed to destroy the Admin Queue resources: %d\n",
                         ret_code);
 
        if (ret_code)
                dev_warn(&pdev->dev,
                         "Failed to destroy the Admin Queue resources: %d\n",
                         ret_code);
 
+       /* destroy the locks only once, here */
+       i40e_destroy_spinlock_d(&hw->aq.arq_spinlock);
+       i40e_destroy_spinlock_d(&hw->aq.asq_spinlock);
+
        /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
        i40e_clear_interrupt_scheme(pf);
        for (i = 0; i < pf->num_alloc_vsi; i++) {
        /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
        i40e_clear_interrupt_scheme(pf);
        for (i = 0; i < pf->num_alloc_vsi; i++) {
@@ -11450,8 +11920,7 @@ static void i40e_remove(struct pci_dev *pdev)
        kfree(pf->qp_pile);
        kfree(pf->vsi);
 
        kfree(pf->qp_pile);
        kfree(pf->vsi);
 
-unmap:
-       iounmap(pf->hw.hw_addr);
+       iounmap(hw->hw_addr);
        kfree(pf);
        pci_release_selected_regions(pdev,
                                     pci_select_bars(pdev, IORESOURCE_MEM));
        kfree(pf);
        pci_release_selected_regions(pdev,
                                     pci_select_bars(pdev, IORESOURCE_MEM));
@@ -11549,7 +12018,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
 
        rtnl_lock();
        i40e_handle_reset_warning(pf);
 
        rtnl_lock();
        i40e_handle_reset_warning(pf);
-       rtnl_lock();
+       rtnl_unlock();
 }
 
 /**
 }
 
 /**
@@ -11564,23 +12033,25 @@ static void i40e_shutdown(struct pci_dev *pdev)
        set_bit(__I40E_SUSPENDED, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
 
        set_bit(__I40E_SUSPENDED, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
 
-       if (!test_bit(__I40E_DEBUG_MODE, &pf->state)) {
-               del_timer_sync(&pf->service_timer);
-               cancel_work_sync(&pf->service_task);
-               i40e_fdir_teardown(pf);
+       if (test_bit(__I40E_DEBUG_MODE, &pf->state))
+               goto debug_mode;
 
 
-               rtnl_lock();
-               i40e_prep_for_reset(pf);
-               rtnl_unlock();
+       del_timer_sync(&pf->service_timer);
+       cancel_work_sync(&pf->service_task);
+       i40e_fdir_teardown(pf);
 
 
-               wr32(hw, I40E_PFPM_APM,
-                    (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
-               wr32(hw, I40E_PFPM_WUFC,
-                    (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+       rtnl_lock();
+       i40e_prep_for_reset(pf);
+       rtnl_unlock();
 
 
-               i40e_clear_interrupt_scheme(pf);
-       }
+       wr32(hw, I40E_PFPM_APM,
+            (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+       wr32(hw, I40E_PFPM_WUFC,
+            (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+       i40e_clear_interrupt_scheme(pf);
 
 
+debug_mode:
        if (system_state == SYSTEM_POWER_OFF) {
                pci_wake_from_d3(pdev, pf->wol_en);
                pci_set_power_state(pdev, PCI_D3hot);
        if (system_state == SYSTEM_POWER_OFF) {
                pci_wake_from_d3(pdev, pf->wol_en);
                pci_set_power_state(pdev, PCI_D3hot);
@@ -11596,25 +12067,34 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
        struct i40e_hw *hw = &pf->hw;
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
        struct i40e_hw *hw = &pf->hw;
+       int retval = 0;
 
        set_bit(__I40E_SUSPENDED, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
 
 
        set_bit(__I40E_SUSPENDED, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
 
-       if (!test_bit(__I40E_DEBUG_MODE, &pf->state)) {
-               rtnl_lock();
-               i40e_prep_for_reset(pf);
-               rtnl_unlock();
+       if (test_bit(__I40E_DEBUG_MODE, &pf->state))
+               goto debug_mode;
 
 
-               wr32(hw, I40E_PFPM_APM,
-                    (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
-               wr32(hw, I40E_PFPM_WUFC,
-                    (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
-       }
+       rtnl_lock();
+       i40e_prep_for_reset(pf);
+       rtnl_unlock();
+
+       wr32(hw, I40E_PFPM_APM,
+            (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+       wr32(hw, I40E_PFPM_WUFC,
+            (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+       i40e_stop_misc_vector(pf);
+
+       retval = pci_save_state(pdev);
+       if (retval)
+               return retval;
 
 
+debug_mode:
        pci_wake_from_d3(pdev, pf->wol_en);
        pci_set_power_state(pdev, PCI_D3hot);
 
        pci_wake_from_d3(pdev, pf->wol_en);
        pci_set_power_state(pdev, PCI_D3hot);
 
-       return 0;
+       return retval;
 }
 
 /**
 }
 
 /**
@@ -11719,9 +12199,6 @@ static int __init i40e_init_module(void)
                return -ENOMEM;
        }
 
                return -ENOMEM;
        }
 
-#if IS_ENABLED(CONFIG_CONFIGFS_FS)
-       i40e_configfs_init();
-#endif /* CONFIG_CONFIGFS_FS */
        i40e_dbg_init();
        return pci_register_driver(&i40e_driver);
 }
        i40e_dbg_init();
        return pci_register_driver(&i40e_driver);
 }
@@ -11738,9 +12215,6 @@ static void __exit i40e_exit_module(void)
        pci_unregister_driver(&i40e_driver);
        destroy_workqueue(i40e_wq);
        i40e_dbg_exit();
        pci_unregister_driver(&i40e_driver);
        destroy_workqueue(i40e_wq);
        i40e_dbg_exit();
-#if IS_ENABLED(CONFIG_CONFIGFS_FS)
-       i40e_configfs_exit();
-#endif /* CONFIG_CONFIGFS_FS */
 #ifdef HAVE_KFREE_RCU_BARRIER
        rcu_barrier();
 #endif
 #ifdef HAVE_KFREE_RCU_BARRIER
        rcu_barrier();
 #endif
similarity index 87%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_nvm.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_nvm.c
index 30ceba0ad0f4001737f39ae1a2f3837e0a8d8131..28cbb69c3aa5c11d4a1aeb6f4183407721b4882d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -46,7 +43,7 @@ i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  * Please notice that the NVM term is used here (& in all methods covered
  * in this file) as an equivalent of the FLASH part mapped into the SR.
  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  * Please notice that the NVM term is used here (& in all methods covered
  * in this file) as an equivalent of the FLASH part mapped into the SR.
- * We are accessing FLASH always thru the Shadow RAM.
+ * We are accessing FLASH always through the Shadow RAM.
  **/
 i40e_status i40e_init_nvm(struct i40e_hw *hw)
 {
  **/
 i40e_status i40e_init_nvm(struct i40e_hw *hw)
 {
@@ -192,7 +189,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
 }
 
 /**
 }
 
 /**
- * i40e_read_nvm_word - Reads Shadow RAM
+ * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  * @data: word read from the Shadow RAM
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
  * @data: word read from the Shadow RAM
@@ -202,7 +199,39 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                                         u16 *data)
 {
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                                         u16 *data)
 {
-       return i40e_read_nvm_word_srctl(hw, offset, data);
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+               ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+               if (!ret_code) {
+                       ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+                       i40e_release_nvm(hw);
+               }
+       } else {
+               ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+       }
+       return ret_code;
+}
+
+/**
+ * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
+                                          u16 offset,
+                                          u16 *data)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+               ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+       else
+               ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+       return ret_code;
 }
 
 /**
 }
 
 /**
@@ -273,7 +302,31 @@ i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
 }
 
 /**
 }
 
 /**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
+                                            u16 offset,
+                                            u16 *words, u16 *data)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+               ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
+       else
+               ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+       return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  * @words: (in) number of words to read; (out) number of words actually read
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
  * @words: (in) number of words to read; (out) number of words actually read
@@ -286,7 +339,19 @@ i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
                                           u16 *words, u16 *data)
 {
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
                                           u16 *words, u16 *data)
 {
-       return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+               ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+               if (!ret_code) {
+                       ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+                                                        data);
+                       i40e_release_nvm(hw);
+               }
+       } else {
+               ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+       }
+       return ret_code;
 }
 
 /**
 }
 
 /**
@@ -306,7 +371,7 @@ i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
        i40e_status ret_code = I40E_SUCCESS;
        u16 index, word;
 
        i40e_status ret_code = I40E_SUCCESS;
        u16 index, word;
 
-       /* Loop thru the selected region */
+       /* Loop through the selected region */
        for (word = 0; word < *words; word++) {
                index = offset + word;
                ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
        for (word = 0; word < *words; word++) {
                index = offset + word;
                ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
@@ -472,7 +537,7 @@ i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
 }
 
 /**
 }
 
 /**
- * i40e_write_nvm_word - Writes Shadow RAM word
+ * __i40e_write_nvm_word - Writes Shadow RAM word
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to write
  * @data: word to write to the Shadow RAM
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to write
  * @data: word to write to the Shadow RAM
@@ -482,8 +547,8 @@ i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
  * reception) by caller. To commit SR to NVM update checksum function
  * should be called.
  **/
  * reception) by caller. To commit SR to NVM update checksum function
  * should be called.
  **/
-i40e_status i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
-                                         void *data)
+i40e_status __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+                                           void *data)
 {
        *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
 
 {
        *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
 
@@ -492,7 +557,7 @@ i40e_status i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
 }
 
 /**
 }
 
 /**
- * i40e_write_nvm_buffer - Writes Shadow RAM buffer
+ * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
  * @hw: pointer to the HW structure
  * @module_pointer: module pointer location in words from the NVM beginning
  * @offset: offset of the Shadow RAM buffer to write
  * @hw: pointer to the HW structure
  * @module_pointer: module pointer location in words from the NVM beginning
  * @offset: offset of the Shadow RAM buffer to write
@@ -504,9 +569,9 @@ i40e_status i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
  * on ARQ completion event reception by caller. To commit SR to NVM update
  * checksum function should be called.
  **/
  * on ARQ completion event reception by caller. To commit SR to NVM update
  * checksum function should be called.
  **/
-i40e_status i40e_write_nvm_buffer(struct i40e_hw *hw,
-                                           u8 module_pointer, u32 offset,
-                                           u16 words, void *data)
+i40e_status __i40e_write_nvm_buffer(struct i40e_hw *hw,
+                                             u8 module_pointer, u32 offset,
+                                             u16 words, void *data)
 {
        __le16 *le_word_ptr = (__le16 *)data;
        u16 *word_ptr = (u16 *)data;
 {
        __le16 *le_word_ptr = (__le16 *)data;
        u16 *word_ptr = (u16 *)data;
@@ -549,15 +614,17 @@ i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
        data = (u16 *)vmem.va;
 
        /* read pointer to VPD area */
        data = (u16 *)vmem.va;
 
        /* read pointer to VPD area */
-       ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
+       ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
+                                       &vpd_module);
        if (ret_code != I40E_SUCCESS) {
                ret_code = I40E_ERR_NVM_CHECKSUM;
                goto i40e_calc_nvm_checksum_exit;
        }
 
        /* read pointer to PCIe Alt Auto-load module */
        if (ret_code != I40E_SUCCESS) {
                ret_code = I40E_ERR_NVM_CHECKSUM;
                goto i40e_calc_nvm_checksum_exit;
        }
 
        /* read pointer to PCIe Alt Auto-load module */
-       ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
-                                     &pcie_alt_module);
+       ret_code = __i40e_read_nvm_word(hw,
+                                       I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+                                       &pcie_alt_module);
        if (ret_code != I40E_SUCCESS) {
                ret_code = I40E_ERR_NVM_CHECKSUM;
                goto i40e_calc_nvm_checksum_exit;
        if (ret_code != I40E_SUCCESS) {
                ret_code = I40E_ERR_NVM_CHECKSUM;
                goto i40e_calc_nvm_checksum_exit;
@@ -571,7 +638,7 @@ i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
                if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
                        u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
 
                if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
                        u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
 
-                       ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
+                       ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
                        if (ret_code != I40E_SUCCESS) {
                                ret_code = I40E_ERR_NVM_CHECKSUM;
                                goto i40e_calc_nvm_checksum_exit;
                        if (ret_code != I40E_SUCCESS) {
                                ret_code = I40E_ERR_NVM_CHECKSUM;
                                goto i40e_calc_nvm_checksum_exit;
@@ -642,13 +709,18 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
        u16 checksum_sr = 0;
        u16 checksum_local = 0;
 
        u16 checksum_sr = 0;
        u16 checksum_local = 0;
 
-       ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
-       if (ret_code != I40E_SUCCESS)
+       if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+               ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+       if (!ret_code) {
+               ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+               if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+                       i40e_release_nvm(hw);
+               if (ret_code != I40E_SUCCESS)
+                       goto i40e_validate_nvm_checksum_exit;
+       } else {
                goto i40e_validate_nvm_checksum_exit;
                goto i40e_validate_nvm_checksum_exit;
+       }
 
 
-       /* Do not use i40e_read_nvm_word() because we do not want to take
-        * the synchronization semaphores twice here.
-        */
        i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
 
        /* Verify read checksum from EEPROM is the same as
        i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
 
        /* Verify read checksum from EEPROM is the same as
@@ -742,10 +814,11 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
        /* early check for status command and debug msgs */
        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
        /* early check for status command and debug msgs */
        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
-       i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+       i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
                   i40e_nvm_update_state_str[upd_cmd],
                   hw->nvmupd_state,
                   i40e_nvm_update_state_str[upd_cmd],
                   hw->nvmupd_state,
-                  hw->aq.nvm_release_on_done);
+                  hw->nvm_release_on_done, hw->nvm_wait_opcode,
+                  cmd->command, cmd->config, cmd->offset, cmd->data_size);
 
        if (upd_cmd == I40E_NVMUPD_INVALID) {
                *perrno = -EFAULT;
 
        if (upd_cmd == I40E_NVMUPD_INVALID) {
                *perrno = -EFAULT;
@@ -758,7 +831,18 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
         * going into the state machine
         */
        if (upd_cmd == I40E_NVMUPD_STATUS) {
         * going into the state machine
         */
        if (upd_cmd == I40E_NVMUPD_STATUS) {
+               if (!cmd->data_size) {
+                       *perrno = -EFAULT;
+                       return I40E_ERR_BUF_TOO_SHORT;
+               }
+
                bytes[0] = hw->nvmupd_state;
                bytes[0] = hw->nvmupd_state;
+
+               if (cmd->data_size >= 4) {
+                       bytes[1] = 0;
+                       *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+               }
+
                return I40E_SUCCESS;
        }
 
                return I40E_SUCCESS;
        }
 
@@ -777,6 +861,14 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 
        case I40E_NVMUPD_STATE_INIT_WAIT:
        case I40E_NVMUPD_STATE_WRITE_WAIT:
 
        case I40E_NVMUPD_STATE_INIT_WAIT:
        case I40E_NVMUPD_STATE_WRITE_WAIT:
+               /* if we need to stop waiting for an event, clear
+                * the wait info and return before doing anything else
+                */
+               if (cmd->offset == 0xffff) {
+                       i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+                       return I40E_SUCCESS;
+               }
+
                status = I40E_ERR_NOT_READY;
                *perrno = -EBUSY;
                break;
                status = I40E_ERR_NOT_READY;
                *perrno = -EBUSY;
                break;
@@ -847,7 +939,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                        if (status) {
                                i40e_release_nvm(hw);
                        } else {
                        if (status) {
                                i40e_release_nvm(hw);
                        } else {
-                               hw->aq.nvm_release_on_done = true;
+                               hw->nvm_release_on_done = true;
+                               hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
@@ -863,7 +956,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                        if (status) {
                                i40e_release_nvm(hw);
                        } else {
                        if (status) {
                                i40e_release_nvm(hw);
                        } else {
-                               hw->aq.nvm_release_on_done = true;
+                               hw->nvm_release_on_done = true;
+                               hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
@@ -876,10 +970,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                                                     hw->aq.asq_last_status);
                } else {
                        status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
                                                     hw->aq.asq_last_status);
                } else {
                        status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-                       if (status)
+                       if (status) {
                                i40e_release_nvm(hw);
                                i40e_release_nvm(hw);
-                       else
+                       } else {
+                               hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                                hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
                                hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+                       }
                }
                break;
 
                }
                break;
 
@@ -897,7 +993,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                                   -EIO;
                                i40e_release_nvm(hw);
                        } else {
                                   -EIO;
                                i40e_release_nvm(hw);
                        } else {
-                               hw->aq.nvm_release_on_done = true;
+                               hw->nvm_release_on_done = true;
+                               hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
@@ -988,8 +1085,10 @@ retry:
        switch (upd_cmd) {
        case I40E_NVMUPD_WRITE_CON:
                status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
        switch (upd_cmd) {
        case I40E_NVMUPD_WRITE_CON:
                status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-               if (!status)
+               if (!status) {
+                       hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+               }
                break;
 
        case I40E_NVMUPD_WRITE_LCB:
                break;
 
        case I40E_NVMUPD_WRITE_LCB:
@@ -1001,12 +1100,14 @@ retry:
                                   -EIO;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                } else {
                                   -EIO;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                } else {
-                       hw->aq.nvm_release_on_done = true;
+                       hw->nvm_release_on_done = true;
+                       hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                }
                break;
 
        case I40E_NVMUPD_CSUM_CON:
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                }
                break;
 
        case I40E_NVMUPD_CSUM_CON:
+               /* Assumes the caller has acquired the nvm */
                status = i40e_update_nvm_checksum(hw);
                if (status) {
                        *perrno = hw->aq.asq_last_status ?
                status = i40e_update_nvm_checksum(hw);
                if (status) {
                        *perrno = hw->aq.asq_last_status ?
@@ -1015,11 +1116,13 @@ retry:
                                   -EIO;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                } else {
                                   -EIO;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                } else {
+                       hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
                }
                break;
 
        case I40E_NVMUPD_CSUM_LCB:
                        hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
                }
                break;
 
        case I40E_NVMUPD_CSUM_LCB:
+               /* Assumes the caller has acquired the nvm */
                status = i40e_update_nvm_checksum(hw);
                if (status) {
                        *perrno = hw->aq.asq_last_status ?
                status = i40e_update_nvm_checksum(hw);
                if (status) {
                        *perrno = hw->aq.asq_last_status ?
@@ -1028,7 +1131,8 @@ retry:
                                   -EIO;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                } else {
                                   -EIO;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                } else {
-                       hw->aq.nvm_release_on_done = true;
+                       hw->nvm_release_on_done = true;
+                       hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                }
                break;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                }
                break;
@@ -1077,6 +1181,38 @@ retry:
        return status;
 }
 
        return status;
 }
 
+/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+       if (opcode == hw->nvm_wait_opcode) {
+
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+               if (hw->nvm_release_on_done) {
+                       i40e_release_nvm(hw);
+                       hw->nvm_release_on_done = false;
+               }
+               hw->nvm_wait_opcode = 0;
+
+               switch (hw->nvmupd_state) {
+               case I40E_NVMUPD_STATE_INIT_WAIT:
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+                       break;
+
+               case I40E_NVMUPD_STATE_WRITE_WAIT:
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+                       break;
+
+               default:
+                       break;
+               }
+       }
+}
+
 /**
  * i40e_nvmupd_validate_command - Validate given command
  * @hw: pointer to hardware structure
 /**
  * i40e_nvmupd_validate_command - Validate given command
  * @hw: pointer to hardware structure
@@ -1237,6 +1373,12 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
                *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
        }
 
                *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
        }
 
+       /* should we wait for a followup event? */
+       if (cmd->offset) {
+               hw->nvm_wait_opcode = cmd->offset;
+               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+       }
+
        return status;
 }
 
        return status;
 }
 
@@ -1295,7 +1437,7 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
                remainder -= len;
                buff = hw->nvm_buff.va;
        } else {
                remainder -= len;
                buff = hw->nvm_buff.va;
        } else {
-               buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+               buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
        }
 
        if (remainder > 0) {
        }
 
        if (remainder > 0) {
similarity index 82%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_osdep.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_osdep.h
index 0a515d2cb2fbc525a0aff33f78fac5f0167b8f59..8195d9699903a69de118800f1bc3eaa0010841b3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -63,9 +60,12 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
  * actual OS primitives
  */
 
  * actual OS primitives
  */
 
-#undef ASSERT
+#define hw_dbg(h, s, ...) do {                         \
+               pr_debug("i40e %02x.%x " s,                     \
+                       (h)->bus.device, (h)->bus.func,         \
+                       ##__VA_ARGS__);                         \
+} while (0)
 
 
-#define hw_dbg(hw, S, A...)    do {} while (0)
 
 #define wr32(a, reg, value)    writel((value), ((a)->hw_addr + (reg)))
 #define rd32(a, reg)           readl((a)->hw_addr + (reg))
 
 #define wr32(a, reg, value)    writel((value), ((a)->hw_addr + (reg)))
 #define rd32(a, reg)           readl((a)->hw_addr + (reg))
@@ -81,7 +81,8 @@ struct i40e_dma_mem {
 } __packed;
 
 #define i40e_allocate_dma_mem(h, m, unused, s, a) \
 } __packed;
 
 #define i40e_allocate_dma_mem(h, m, unused, s, a) \
-                       i40e_allocate_dma_mem_d(h, m, s, a)
+                       i40e_allocate_dma_mem_d(h, m, unused, s, a)
+
 #define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
 
 struct i40e_virt_mem {
 #define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
 
 struct i40e_virt_mem {
@@ -116,10 +117,20 @@ struct i40e_spinlock {
        struct mutex spinlock;
 };
 
        struct mutex spinlock;
 };
 
-#define i40e_init_spinlock(_sp) i40e_init_spinlock_d(_sp)
+static inline void i40e_no_action(struct i40e_spinlock *sp)
+{
+       /* nothing */
+}
+
+/* the locks are initialized in _probe and destroyed in _remove
+ * so make sure NOT to implement init/destroy here, as to
+ * avoid the i40e_init_adminq code trying to reinitialize
+ * the persistent lock memory
+ */
+#define i40e_init_spinlock(_sp)    i40e_no_action(_sp)
 #define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp)
 #define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp)
 #define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp)
 #define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp)
-#define i40e_destroy_spinlock(_sp) i40e_destroy_spinlock_d(_sp)
+#define i40e_destroy_spinlock(_sp) i40e_no_action(_sp)
 
 #define I40E_HTONL(a)          htonl(a)
 
 
 #define I40E_HTONL(a)          htonl(a)
 
@@ -128,6 +139,8 @@ struct i40e_spinlock {
 
 typedef enum i40e_status_code i40e_status;
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 
 typedef enum i40e_status_code i40e_status;
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifdef WITH_FCOE
 #define I40E_FCOE
 #define I40E_FCOE
+#endif
 #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
 #endif /* _I40E_OSDEP_H_ */
 #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
 #endif /* _I40E_OSDEP_H_ */
similarity index 85%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_prototype.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_prototype.h
index d1fff9bdc0ed260d4e11e570447f0faecb3c6a78..123a27b99b62076731084fe06db2027ee2180968 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -60,21 +57,36 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                                void *buff, /* can be NULL */
                                u16  buff_size,
                                struct i40e_asq_cmd_details *cmd_details);
                                void *buff, /* can be NULL */
                                u16  buff_size,
                                struct i40e_asq_cmd_details *cmd_details);
-bool i40e_asq_done(struct i40e_hw *hw);
 
 /* debug function for adminq */
 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
                   void *desc, void *buffer, u16 buf_len);
 
 void i40e_idle_aq(struct i40e_hw *hw);
 
 /* debug function for adminq */
 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
                   void *desc, void *buffer, u16 buf_len);
 
 void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+                                         bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+                                         bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+                                    u16 seid,
+                                    struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+                                    u16 seid,
+                                    struct i40e_aqc_get_set_rss_key_data *key);
 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
+                                      u16 led_addr, u32 mode);
+i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+                                      u16 *val);
+i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+                                             u32 time, u32 interval);
 
 /* admin send queue commands */
 
 
 /* admin send queue commands */
 
@@ -135,7 +147,8 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
                                u16 vsi_id, bool set_filter,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
                                u16 vsi_id, bool set_filter,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
-               u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+               u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+               bool rx_only_promisc);
 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
                u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
                u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
@@ -144,6 +157,9 @@ i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
 i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
                                u16 seid, bool enable, u16 vid,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
                                u16 seid, bool enable, u16 vid,
                                struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+                               u16 seid, bool enable,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
                                struct i40e_vsi_context *vsi_ctx,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
                                struct i40e_vsi_context *vsi_ctx,
                                struct i40e_asq_cmd_details *cmd_details);
@@ -152,8 +168,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
                                u16 downlink_seid, u8 enabled_tc,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
                                u16 downlink_seid, u8 enabled_tc,
-                               bool default_port, bool enable_l2_filtering,
-                               u16 *pveb_seid,
+                               bool default_port, u16 *pveb_seid,
+                               bool enable_stats,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
                                u16 veb_seid, u16 *switch_id, bool *floating,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
                                u16 veb_seid, u16 *switch_id, bool *floating,
@@ -166,6 +182,15 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
                        struct i40e_aqc_remove_macvlan_element_data *mv_list,
                        u16 count, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
                        struct i40e_aqc_remove_macvlan_element_data *mv_list,
                        u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+                       u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+                       struct i40e_asq_cmd_details *cmd_details,
+                       u16 *rule_id, u16 *rules_used, u16 *rules_free);
+i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+                       u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+                       struct i40e_asq_cmd_details *cmd_details,
+                       u16 *rules_used, u16 *rules_free);
+
 i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
                        struct i40e_aqc_add_remove_vlan_element_data *v_list,
                        u8 count, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
                        struct i40e_aqc_add_remove_vlan_element_data *v_list,
                        u8 count, struct i40e_asq_cmd_details *cmd_details);
@@ -179,6 +204,9 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
                                struct i40e_aqc_get_switch_config_resp *buf,
                                u16 buf_size, u16 *start_seid,
                                struct i40e_asq_cmd_details *cmd_details);
                                struct i40e_aqc_get_switch_config_resp *buf,
                                u16 buf_size, u16 *start_seid,
                                struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_switch_config(struct i40e_hw *hw,
+                               u16 flags, u16 valid_flags,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
                                enum i40e_aq_resources_ids resource,
                                enum i40e_aq_resource_access_type access,
 i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
                                enum i40e_aq_resources_ids resource,
                                enum i40e_aq_resource_access_type access,
@@ -300,10 +328,6 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
 i40e_status i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
                                u8 tcmap, bool request, u8 *tcmap_ret,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
                                u8 tcmap, bool request, u8 *tcmap_ret,
                                struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
-                               enum i40e_aq_hmc_profile *profile,
-                               u8 *pe_vf_enabled_count,
-                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_config_switch_comp_ets_bw_limit(
        struct i40e_hw *hw, u16 seid,
        struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
 i40e_status i40e_aq_config_switch_comp_ets_bw_limit(
        struct i40e_hw *hw, u16 seid,
        struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
@@ -314,10 +338,6 @@ i40e_status i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
                        struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
                        struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
-                               enum i40e_aq_hmc_profile profile,
-                               u8 pe_vf_enabled_count,
-                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
                                u16 seid, u16 credit, u8 max_bw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
                                u16 seid, u16 credit, u8 max_bw,
                                struct i40e_asq_cmd_details *cmd_details);
@@ -366,7 +386,6 @@ i40e_status i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
                u16 vsi,
                struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
                u8 filter_count);
                u16 vsi,
                struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
                u8 filter_count);
-
 i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
                                u32 reg_addr0, u32 *reg_val0,
                                u32 reg_addr1, u32 *reg_val1);
 i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
                                u32 reg_addr0, u32 *reg_val0,
                                u32 reg_addr1, u32 *reg_val1);
@@ -416,9 +435,13 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
                                        u32 offset, u16 words, void *data,
                                        bool last_command);
 i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
                                        u32 offset, u16 words, void *data,
                                        bool last_command);
-i40e_status i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+                                          u16 *data);
+i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+                                            u16 *words, u16 *data);
+i40e_status __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
                                          void *data);
                                          void *data);
-i40e_status i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
+i40e_status __i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
                                            u32 offset, u16 words, void *data);
 i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
                                            u32 offset, u16 words, void *data);
 i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
@@ -427,6 +450,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
                                          struct i40e_nvm_access *cmd,
                                          u8 *bytes, int *);
 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
                                          struct i40e_nvm_access *cmd,
                                          u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 
 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 
 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -465,4 +489,34 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
                                struct i40e_asq_cmd_details *cmd_details);
 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
                                                    u16 vsi_seid);
                                struct i40e_asq_cmd_details *cmd_details);
 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
                                                    u16 vsi_seid);
+i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+                               u32 reg_addr, u32 *reg_val,
+                               struct i40e_asq_cmd_details *cmd_details);
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
+i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+                               u32 reg_addr, u32 reg_val,
+                               struct i40e_asq_cmd_details *cmd_details);
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+i40e_status i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+                       struct i40e_aqc_arp_proxy_data *proxy_config,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+                       struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+                       u8 filter_index,
+                       struct i40e_aqc_set_wol_filter_data *filter,
+                       bool set_filter, bool no_wol_tco,
+                       bool filter_valid, bool no_wol_tco_valid,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+                       u16 *wake_reason,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
+                                            u16 reg, u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
+                                             u16 reg, u8 phy_addr, u16 value);
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+                                             u32 time, u32 interval);
 #endif /* _I40E_PROTOTYPE_H_ */
 #endif /* _I40E_PROTOTYPE_H_ */
similarity index 98%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_ptp.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_ptp.c
index a6dce3ee880d82e721e21715e047262b7e687d50..3f3cdb250c314163d55ab1c225080e6d4ba0e390 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  *
  ******************************************************************************/
 
  *
  ******************************************************************************/
 
+/* this lets the macros that return timespec64 or structs compile cleanly with
+ * W=2
+ */
+#pragma GCC diagnostic ignored "-Waggregate-return"
 #include "i40e.h"
 #ifdef HAVE_PTP_1588_CLOCK
 #include <linux/ptp_classify.h>
 #include "i40e.h"
 #ifdef HAVE_PTP_1588_CLOCK
 #include <linux/ptp_classify.h>
@@ -159,13 +160,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-       struct timespec64 now, then = ns_to_timespec64(delta);
+       struct timespec64 now;
        unsigned long flags;
 
        spin_lock_irqsave(&pf->tmreg_lock, flags);
 
        i40e_ptp_read(pf, &now);
        unsigned long flags;
 
        spin_lock_irqsave(&pf->tmreg_lock, flags);
 
        i40e_ptp_read(pf, &now);
-       now = timespec_add(now, then);
+       timespec64_add_ns(&now, delta);
        i40e_ptp_write(pf, (const struct timespec64 *)&now);
 
        spin_unlock_irqrestore(&pf->tmreg_lock, flags);
        i40e_ptp_write(pf, (const struct timespec64 *)&now);
 
        spin_unlock_irqrestore(&pf->tmreg_lock, flags);
@@ -361,6 +362,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
        skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
        dev_kfree_skb_any(pf->ptp_tx_skb);
        pf->ptp_tx_skb = NULL;
        skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
        dev_kfree_skb_any(pf->ptp_tx_skb);
        pf->ptp_tx_skb = NULL;
+       clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
 }
 
 /**
 }
 
 /**
@@ -723,13 +725,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
                i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
 
                /* Set the clock value. */
                i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
 
                /* Set the clock value. */
-#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
                ts = ktime_to_timespec64(ktime_get_real());
                i40e_ptp_settime64(&pf->ptp_caps, &ts);
                ts = ktime_to_timespec64(ktime_get_real());
                i40e_ptp_settime64(&pf->ptp_caps, &ts);
-#else
-               ts = ktime_to_timespec(ktime_get_real());
-               i40e_ptp_settime(&pf->ptp_caps, &ts);
-#endif
        }
 }
 
        }
 }
 
@@ -749,6 +746,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
        if (pf->ptp_tx_skb) {
                dev_kfree_skb_any(pf->ptp_tx_skb);
                pf->ptp_tx_skb = NULL;
        if (pf->ptp_tx_skb) {
                dev_kfree_skb_any(pf->ptp_tx_skb);
                pf->ptp_tx_skb = NULL;
+               clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
        }
 
        if (pf->ptp_clock) {
        }
 
        if (pf->ptp_clock) {
similarity index 60%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_register.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_register.h
index 00abf0f8629726fe0f5bd4e990ab85cb9b985098..3011e4529e0ff73beb3ffd877cda3074ed2a081b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
 #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
 #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
 #define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
 #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
 #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
 #define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GL_PRS_FVBM_MAX_INDEX 3
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31
+#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT)
 #define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
 #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
 #define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
 #define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
 #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
 #define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
 #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
 #define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
 #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
 #define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
 #define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_MSK_MAX_INDEX 63
 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
 #define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_MSK_MAX_INDEX 63
 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_FD_MSK_MAX_INDEX 1
+#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
+#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
+#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
+#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
+#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
+#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
+#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
+#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
+#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_ORT_MAX_INDEX 63
+#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
+#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
+#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
+#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
+#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
+#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
+#define I40E_GLQF_PIT_MAX_INDEX 23
+#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+#define I40E_GLQF_PIT_FSIZE_SHIFT 5
+#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
+#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
+#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
 #endif /* _I40E_REGISTER_H_ */
 #endif /* _I40E_REGISTER_H_ */
similarity index 92%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_status.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_status.h
index 9e1f25c2168df0a62b2cf2005654fa2f8605f656..083b00292686358ddacb79a5150e4ee41ac38743 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
similarity index 61%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_txrx.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_txrx.c
index 0ecb43db978683c2a5fa6e3fbf9fa3e0fb604858..458624cd4e62440a51e7e5933146c5fb6b45a467 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -102,7 +99,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
 
        fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
               I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
 
        fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
               I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
-
        fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
               I40E_TXD_FLTR_QW0_PCTYPE_MASK;
 
        fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
               I40E_TXD_FLTR_QW0_PCTYPE_MASK;
 
@@ -180,7 +176,6 @@ dma_fail:
 }
 
 #define IP_HEADER_OFFSET 14
 }
 
 #define IP_HEADER_OFFSET 14
-#define I40E_UDPIP_DUMMY_PACKET_LEN 42
 /**
  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
  * @vsi: pointer to the targeted VSI
 /**
  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
  * @vsi: pointer to the targeted VSI
@@ -198,6 +193,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
        struct iphdr *ip;
        bool err = false;
        u8 *raw_packet;
        struct iphdr *ip;
        bool err = false;
        u8 *raw_packet;
+       u16 off;
        int ret;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
                0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
        int ret;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
                0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
@@ -217,6 +213,12 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
        ip->saddr = fd_data->src_ip[0];
        udp->source = fd_data->src_port;
 
        ip->saddr = fd_data->src_ip[0];
        udp->source = fd_data->src_port;
 
+       /* For now, supports only word as flex payload */
+       if (i40e_is_flex_filter(fd_data)) {
+               off = ~(be16_to_cpu(fd_data->flex_mask[3]));
+               *((u16 *)(raw_packet + off)) = fd_data->flex_bytes[3];
+       }
+
        fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
        ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
        if (ret) {
        fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
        ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
        if (ret) {
@@ -234,10 +236,23 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
                                 "Filter deleted for PCTYPE %d loc = %d\n",
                                 fd_data->pctype, fd_data->fd_id);
        }
                                 "Filter deleted for PCTYPE %d loc = %d\n",
                                 fd_data->pctype, fd_data->fd_id);
        }
+       if (err) {
+               kfree(raw_packet);
+       } else {
+               if (add) {
+                       pf->fd_udp4_filter_cnt++;
+                       if (i40e_is_flex_filter(fd_data))
+                               pf->fd_flex_filter_cnt++;
+               } else {
+                       pf->fd_udp4_filter_cnt--;
+                       if (i40e_is_flex_filter(fd_data))
+                               pf->fd_flex_filter_cnt--;
+               }
+       }
+
        return err ? -EOPNOTSUPP : 0;
 }
 
        return err ? -EOPNOTSUPP : 0;
 }
 
-#define I40E_TCPIP_DUMMY_PACKET_LEN 54
 /**
  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
  * @vsi: pointer to the targeted VSI
 /**
  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
  * @vsi: pointer to the targeted VSI
@@ -255,11 +270,12 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
        struct iphdr *ip;
        bool err = false;
        u8 *raw_packet;
        struct iphdr *ip;
        bool err = false;
        u8 *raw_packet;
+       u16 off;
        int ret;
        /* Dummy packet */
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
                0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
        int ret;
        /* Dummy packet */
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
                0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
-               0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
+               0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x11,
                0x0, 0x72, 0, 0, 0, 0};
 
        raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
                0x0, 0x72, 0, 0, 0, 0};
 
        raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
@@ -276,6 +292,12 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
        ip->saddr = fd_data->src_ip[0];
        tcp->source = fd_data->src_port;
 
        ip->saddr = fd_data->src_ip[0];
        tcp->source = fd_data->src_port;
 
+       /* For now, supports only word as flex payload */
+       if (i40e_is_flex_filter(fd_data)) {
+               off = ~(be16_to_cpu(fd_data->flex_mask[3]));
+               *((u16 *)(raw_packet + off)) = fd_data->flex_bytes[3];
+       }
+
        if (add) {
                pf->fd_tcp_rule++;
                if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
        if (add) {
                pf->fd_tcp_rule++;
                if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
@@ -311,6 +333,20 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                                 fd_data->pctype, fd_data->fd_id);
        }
 
                                 fd_data->pctype, fd_data->fd_id);
        }
 
+       if (err) {
+               kfree(raw_packet);
+       } else {
+               if (add) {
+                       pf->fd_tcp4_filter_cnt++;
+                       if (i40e_is_flex_filter(fd_data))
+                               pf->fd_flex_filter_cnt++;
+               } else {
+                       pf->fd_tcp4_filter_cnt--;
+                       if (i40e_is_flex_filter(fd_data))
+                               pf->fd_flex_filter_cnt--;
+               }
+       }
+
        return err ? -EOPNOTSUPP : 0;
 }
 
        return err ? -EOPNOTSUPP : 0;
 }
 
@@ -330,7 +366,6 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
        return -EOPNOTSUPP;
 }
 
        return -EOPNOTSUPP;
 }
 
-#define I40E_IP_DUMMY_PACKET_LEN 34
 /**
  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
  * a specific flow spec
 /**
  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
  * a specific flow spec
@@ -348,6 +383,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
        struct iphdr *ip;
        bool err = false;
        u8 *raw_packet;
        struct iphdr *ip;
        bool err = false;
        u8 *raw_packet;
+       u16 off;
        int ret;
        int i;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
        int ret;
        int i;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
@@ -366,6 +402,12 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
                ip->daddr = fd_data->dst_ip[0];
                ip->protocol = 0;
 
                ip->daddr = fd_data->dst_ip[0];
                ip->protocol = 0;
 
+               /* For now, supports only word as flex payload */
+               if (i40e_is_flex_filter(fd_data)) {
+                       off = ~(be16_to_cpu(fd_data->flex_mask[3]));
+                       *((u16 *)(raw_packet + off)) = fd_data->flex_bytes[3];
+               }
+
                fd_data->pctype = i;
                ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
 
                fd_data->pctype = i;
                ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
 
@@ -386,6 +428,20 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
                }
        }
 
                }
        }
 
+       if (err) {
+               kfree(raw_packet);
+       } else {
+               if (add) {
+                       pf->fd_ip4_filter_cnt++;
+                       if (fd_data->flex_bytes[3] && fd_data->flex_mask[3])
+                               pf->fd_flex_filter_cnt++;
+               } else {
+                       pf->fd_ip4_filter_cnt--;
+                       if (fd_data->flex_bytes[3] && fd_data->flex_mask[3])
+                               pf->fd_flex_filter_cnt--;
+               }
+       }
+
        return err ? -EOPNOTSUPP : 0;
 }
 
        return err ? -EOPNOTSUPP : 0;
 }
 
@@ -402,7 +458,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
        struct i40e_pf *pf = vsi->back;
        int ret;
 
        struct i40e_pf *pf = vsi->back;
        int ret;
 
-       switch (input->flow_type & ~FLOW_EXT) {
+       switch (input->flow_type & FLOW_TYPE_MASK) {
        case TCP_V4_FLOW:
                ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
                break;
        case TCP_V4_FLOW:
                ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
                break;
@@ -432,7 +488,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                }
                break;
        default:
                }
                break;
        default:
-               dev_info(&pf->pdev->dev, "Could not specify spec type %d",
+               dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
                         input->flow_type);
                ret = -EINVAL;
        }
                         input->flow_type);
                ret = -EINVAL;
        }
@@ -476,7 +532,6 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                 * progress do nothing, once flush is complete the state will
                 * be cleared.
                 */
                 * progress do nothing, once flush is complete the state will
                 * be cleared.
                 */
-
                if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
                        return;
 
                if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
                        return;
 
@@ -486,8 +541,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
 
                if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
 
                if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
-                       pf->auto_disable_flags |=
-                                               I40E_FLAG_FD_ATR_ENABLED;
+                       pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
                        set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
                }
 
                        set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
                }
 
@@ -500,7 +554,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                 */
                if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
                        if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                 */
                if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
                        if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
-                         !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                           !(pf->auto_disable_flags &
+                                    I40E_FLAG_FD_SB_ENABLED)) {
                                if (I40E_DEBUG_FD & pf->hw.debug_mask)
                                        dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
                                pf->auto_disable_flags |=
                                if (I40E_DEBUG_FD & pf->hw.debug_mask)
                                        dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
                                pf->auto_disable_flags |=
@@ -523,11 +578,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
                                            struct i40e_tx_buffer *tx_buffer)
 {
        if (tx_buffer->skb) {
                                            struct i40e_tx_buffer *tx_buffer)
 {
        if (tx_buffer->skb) {
-               if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
-                       kfree(tx_buffer->raw_buf);
-               else
-                       dev_kfree_skb_any(tx_buffer->skb);
-
+               dev_kfree_skb_any(tx_buffer->skb);
                if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
                                         dma_unmap_addr(tx_buffer, dma),
                if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
                                         dma_unmap_addr(tx_buffer, dma),
@@ -539,6 +590,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
                               dma_unmap_len(tx_buffer, len),
                               DMA_TO_DEVICE);
        }
                               dma_unmap_len(tx_buffer, len),
                               DMA_TO_DEVICE);
        }
+
+       if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+               kfree(tx_buffer->raw_buf);
+
        tx_buffer->next_to_watch = NULL;
        tx_buffer->skb = NULL;
        dma_unmap_len_set(tx_buffer, len, 0);
        tx_buffer->next_to_watch = NULL;
        tx_buffer->skb = NULL;
        dma_unmap_len_set(tx_buffer, len, 0);
@@ -601,15 +656,19 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
+ * @in_sw: is tx_pending being checked in SW or HW
  *
  * Since there is no access to the ring head register
  * in XL710, we need to use our local copies
  **/
  *
  * Since there is no access to the ring head register
  * in XL710, we need to use our local copies
  **/
-u32 i40e_get_tx_pending(struct i40e_ring *ring)
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
 {
        u32 head, tail;
 
 {
        u32 head, tail;
 
-       head = i40e_get_head(ring);
+       if (!in_sw)
+               head = i40e_get_head(ring);
+       else
+               head = ring->next_to_clean;
        tail = readl(ring->tail);
 
        if (head != tail)
        tail = readl(ring->tail);
 
        if (head != tail)
@@ -619,50 +678,6 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring)
        return 0;
 }
 
        return 0;
 }
 
-/**
- * i40e_check_tx_hang - Is there a hang in the Tx queue
- * @tx_ring: the ring of descriptors
- **/
-static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
-{
-       u32 tx_done = tx_ring->stats.packets;
-       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
-       u32 tx_pending = i40e_get_tx_pending(tx_ring);
-       struct i40e_pf *pf = tx_ring->vsi->back;
-       bool ret = false;
-
-       clear_check_for_tx_hang(tx_ring);
-
-       /* Check for a hung queue, but be thorough. This verifies
-        * that a transmit has been completed since the previous
-        * check AND there is at least one packet pending. The
-        * ARMED bit is set to indicate a potential hang. The
-        * bit is cleared if a pause frame is received to remove
-        * false hang detection due to PFC or 802.3x frames. By
-        * requiring this to fail twice we avoid races with
-        * PFC clearing the ARMED bit and conditions where we
-        * run the check_tx_hang logic with a transmit completion
-        * pending but without time to complete it yet.
-        */
-       if ((tx_done_old == tx_done) && tx_pending) {
-               /* make sure it is true for two checks in a row */
-               ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
-                                      &tx_ring->state);
-       } else if (tx_done_old == tx_done &&
-                  (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
-               if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
-                       dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
-                                tx_pending, tx_ring->queue_index);
-               pf->tx_sluggish_count++;
-       } else {
-               /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_done;
-               clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
-       }
-
-       return ret;
-}
-
 #define WB_STRIDE 0x3
 
 /**
 #define WB_STRIDE 0x3
 
 /**
@@ -709,7 +724,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                total_packets += tx_buf->gso_segs;
 
                /* free the skb */
                total_packets += tx_buf->gso_segs;
 
                /* free the skb */
-               dev_kfree_skb_any(tx_buf->skb);
+               dev_consume_skb_any(tx_buf->skb);
 
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
 
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
@@ -768,37 +783,28 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
-       if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
-               /* schedule immediate reset if we believe we hung */
-               dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
-                        "  VSI                  <%d>\n"
-                        "  Tx Queue             <%d>\n"
-                        "  next_to_use          <%x>\n"
-                        "  next_to_clean        <%x>\n",
-                        tx_ring->vsi->seid,
-                        tx_ring->queue_index,
-                        tx_ring->next_to_use, i);
-
-               netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-
-               dev_info(tx_ring->dev,
-                        "tx hang detected on queue %d, reset requested\n",
-                        tx_ring->queue_index);
-
-               /* do not fire the reset immediately, wait for the stack to
-                * decide we are truly stuck, also prevents every queue from
-                * simultaneously requesting a reset
+       if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+               unsigned int j = 0;
+
+               /* check to see if there are < 4 descriptors
+                * waiting to be written back, then kick the hardware to force
+                * them to be written back in case we stay in NAPI.
+                * In this mode on X722 we do not enable Interrupt.
                 */
                 */
+               j = i40e_get_tx_pending(tx_ring, false);
 
 
-               /* the adapter is about to reset, no point in enabling polling */
-               budget = 1;
+               if (budget &&
+                   ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
+                   !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+                   (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+                       tx_ring->arm_wb = true;
        }
 
        netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
                                                      tx_ring->queue_index),
                                  total_packets, total_bytes);
 
        }
 
        netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
                                                      tx_ring->queue_index),
                                  total_packets, total_bytes);
 
-#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
        if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
                     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
        if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
                     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
@@ -818,7 +824,41 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
 }
 
 /**
 }
 
 /**
- * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors
+ * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to enable writeback
+ *
+ **/
+static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
+                                 struct i40e_q_vector *q_vector)
+{
+       u16 flags = q_vector->tx.ring[0].flags;
+       u32 val;
+
+       if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
+               return;
+
+       if (q_vector->arm_wb_state)
+               return;
+
+       if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+               val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
+                     I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
+
+               wr32(&vsi->back->hw,
+                   I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+                   val);
+       } else {
+               val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
+                     I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
+
+               wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+       }
+       q_vector->arm_wb_state = true;
+}
+
+/**
+ * i40e_force_wb - Issue SW Interrupt so HW does a wb
  * @vsi: the VSI we care about
  * @q_vector: the vector  on which to force writeback
  *
  * @vsi: the VSI we care about
  * @q_vector: the vector  on which to force writeback
  *
@@ -829,22 +869,20 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
                u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
                          I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
                          I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
                u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
                          I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
                          I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
-                         I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
-                         /* allow 00 to be written to the index */;
+                         I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+                         /* allow 00 to be written to the index */
 
                wr32(&vsi->back->hw,
                     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
 
                wr32(&vsi->back->hw,
                     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
-                                        vsi->base_vector - 1),
-                    val);
+                                        vsi->base_vector - 1), val);
        } else {
                u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
                          I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
                          I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
        } else {
                u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
                          I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
                          I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
-                         I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK
-                         /* allow 00 to be written to the index */;
+                         I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
+                       /* allow 00 to be written to the index */
 
                wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
 
                wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
-
        }
 }
 
        }
 }
 
@@ -852,7 +890,7 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
  * i40e_set_new_dynamic_itr - Find new ITR level
  * @rc: structure containing ring performance data
  *
  * i40e_set_new_dynamic_itr - Find new ITR level
  * @rc: structure containing ring performance data
  *
- * Returns true if itr changed, false if not
+ * Returns true if ITR changed, false if not
  *
  * Stores a new ITR value based on packets and byte counts during
  * the last interrupt.  The advantage of per interrupt computation
  *
  * Stores a new ITR value based on packets and byte counts during
  * the last interrupt.  The advantage of per interrupt computation
@@ -877,7 +915,7 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
         *   0-10MB/s   lowest (50000 ints/s)
         *  10-20MB/s   low    (20000 ints/s)
         *  20-1249MB/s bulk   (18000 ints/s)
         *   0-10MB/s   lowest (50000 ints/s)
         *  10-20MB/s   low    (20000 ints/s)
         *  20-1249MB/s bulk   (18000 ints/s)
-        *  > 40000 rx packets per second (8000 ints/s)
+        *  > 40000 Rx packets per second (8000 ints/s)
         *
         * The math works out because the divisor is in 10^(-6) which
         * turns the bytes/us input value into MB/s values, but
         *
         * The math works out because the divisor is in 10^(-6) which
         * turns the bytes/us input value into MB/s values, but
@@ -1030,7 +1068,6 @@ err:
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
-       struct i40e_rx_buffer *rx_bi;
        unsigned long bi_size;
        u16 i;
 
        unsigned long bi_size;
        u16 i;
 
@@ -1038,48 +1075,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
        if (!rx_ring->rx_bi)
                return;
 
        if (!rx_ring->rx_bi)
                return;
 
-       if (ring_is_ps_enabled(rx_ring)) {
-               int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
-               rx_bi = &rx_ring->rx_bi[0];
-               if (rx_bi->hdr_buf) {
-                       dma_free_coherent(dev,
-                                         bufsz,
-                                         rx_bi->hdr_buf,
-                                         rx_bi->dma);
-                       for (i = 0; i < rx_ring->count; i++) {
-                               rx_bi = &rx_ring->rx_bi[i];
-                               rx_bi->dma = 0;
-                               rx_bi->hdr_buf = NULL;
-                       }
-               }
-       }
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
-               rx_bi = &rx_ring->rx_bi[i];
-               if (rx_bi->dma) {
-                       dma_unmap_single(dev,
-                                        rx_bi->dma,
-                                        rx_ring->rx_buf_len,
-                                        DMA_FROM_DEVICE);
-                       rx_bi->dma = 0;
-               }
+               struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
                if (rx_bi->skb) {
                        dev_kfree_skb(rx_bi->skb);
                        rx_bi->skb = NULL;
                }
                if (rx_bi->skb) {
                        dev_kfree_skb(rx_bi->skb);
                        rx_bi->skb = NULL;
                }
-               if (rx_bi->page) {
-                       if (rx_bi->page_dma) {
-                               dma_unmap_page(dev,
-                                              rx_bi->page_dma,
-                                              PAGE_SIZE / 2,
-                                              DMA_FROM_DEVICE);
-                               rx_bi->page_dma = 0;
-                       }
-                       __free_page(rx_bi->page);
-                       rx_bi->page = NULL;
-                       rx_bi->page_offset = 0;
-               }
+               if (!rx_bi->page)
+                       continue;
+
+               dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+               __free_pages(rx_bi->page, 0);
+
+               rx_bi->page = NULL;
+               rx_bi->page_offset = 0;
        }
 
        bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
        }
 
        bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -1088,6 +1099,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
 
        /* Zero out the descriptor ring */
        memset(rx_ring->desc, 0, rx_ring->size);
 
+       rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 }
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 }
@@ -1111,36 +1123,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
        }
 }
 
        }
 }
 
-/**
- * i40e_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
-       struct device *dev = rx_ring->dev;
-       struct i40e_rx_buffer *rx_bi;
-       dma_addr_t dma;
-       void *buffer;
-       int buf_size;
-       int i;
-
-       if (rx_ring->rx_bi[0].hdr_buf)
-               return;
-       /* Make sure the buffers don't cross cache line boundaries. */
-       buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
-       buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
-                                   &dma, GFP_KERNEL);
-       if (!buffer)
-               return;
-       for (i = 0; i < rx_ring->count; i++) {
-               rx_bi = &rx_ring->rx_bi[i];
-               rx_bi->dma = dma + (i * buf_size);
-               rx_bi->hdr_buf = buffer + (i * buf_size);
-       }
-}
 /**
  * i40e_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
 /**
  * i40e_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
@@ -1164,9 +1146,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 #endif /* HAVE_NDO_GET_STATS64 */
 
        /* Round up to nearest 4K */
 #endif /* HAVE_NDO_GET_STATS64 */
 
        /* Round up to nearest 4K */
-       rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
-               ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
-               : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+       rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
        rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
        rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
@@ -1177,6 +1157,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
                goto err;
        }
 
                goto err;
        }
 
+       rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
@@ -1195,6 +1176,10 @@ err:
 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 {
        rx_ring->next_to_use = val;
 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 {
        rx_ring->next_to_use = val;
+
+       /* update next to alloc since we have filled the ring */
+       rx_ring->next_to_alloc = val;
+
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -1205,125 +1190,49 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 }
 
 /**
 }
 
 /**
- * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
+ *
+ * Returns true if the page was successfully allocated or
+ * reused.
  **/
  **/
-void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+                                  struct i40e_rx_buffer *bi)
 {
 {
-       u16 i = rx_ring->next_to_use;
-       union i40e_rx_desc *rx_desc;
-       struct i40e_rx_buffer *bi;
-
-       /* do nothing if no valid netdev defined */
-       if (!rx_ring->netdev || !cleaned_count)
-               return;
-
-       while (cleaned_count--) {
-               rx_desc = I40E_RX_DESC(rx_ring, i);
-               bi = &rx_ring->rx_bi[i];
-
-               if (bi->skb) /* desc is in use */
-                       goto no_buffers;
-               if (!bi->page) {
-                       bi->page = alloc_page(GFP_ATOMIC);
-                       if (!bi->page) {
-                               rx_ring->rx_stats.alloc_page_failed++;
-                               goto no_buffers;
-                       }
-               }
-
-               if (!bi->page_dma) {
-                       /* use a half page if we're re-using */
-                       bi->page_offset ^= PAGE_SIZE / 2;
-                       bi->page_dma = dma_map_page(rx_ring->dev,
-                                                   bi->page,
-                                                   bi->page_offset,
-                                                   PAGE_SIZE / 2,
-                                                   DMA_FROM_DEVICE);
-                       if (dma_mapping_error(rx_ring->dev,
-                                             bi->page_dma)) {
-                               rx_ring->rx_stats.alloc_page_failed++;
-                               bi->page_dma = 0;
-                               goto no_buffers;
-                       }
-               }
+       struct page *page = bi->page;
+       dma_addr_t dma;
 
 
-               dma_sync_single_range_for_device(rx_ring->dev,
-                                                bi->dma,
-                                                0,
-                                                rx_ring->rx_hdr_len,
-                                                DMA_FROM_DEVICE);
-               /* Refresh the desc even if buffer_addrs didn't change
-                * because each write-back erases this info.
-                */
-               rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
-               rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-               i++;
-               if (i == rx_ring->count)
-                       i = 0;
+       /* since we are recycling buffers we should seldom need to alloc */
+       if (likely(page)) {
+               rx_ring->rx_stats.page_reuse_count++;
+               return true;
        }
 
        }
 
-no_buffers:
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
-}
-
-/**
- * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
- **/
-void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
-{
-       u16 i = rx_ring->next_to_use;
-       union i40e_rx_desc *rx_desc;
-       struct i40e_rx_buffer *bi;
-       struct sk_buff *skb;
-
-       /* do nothing if no valid netdev defined */
-       if (!rx_ring->netdev || !cleaned_count)
-               return;
-
-       while (cleaned_count--) {
-               rx_desc = I40E_RX_DESC(rx_ring, i);
-               bi = &rx_ring->rx_bi[i];
-               skb = bi->skb;
-
-               if (!skb) {
-                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                       rx_ring->rx_buf_len);
-                       if (!skb) {
-                               rx_ring->rx_stats.alloc_buff_failed++;
-                               goto no_buffers;
-                       }
-                       /* initialize queue mapping */
-                       skb_record_rx_queue(skb, rx_ring->queue_index);
-                       bi->skb = skb;
-               }
+       /* alloc new page for storage */
+       page = dev_alloc_page();
+       if (unlikely(!page)) {
+               rx_ring->rx_stats.alloc_page_failed++;
+               return false;
+       }
 
 
-               if (!bi->dma) {
-                       bi->dma = dma_map_single(rx_ring->dev,
-                                                skb->data,
-                                                rx_ring->rx_buf_len,
-                                                DMA_FROM_DEVICE);
-                       if (dma_mapping_error(rx_ring->dev, bi->dma)) {
-                               rx_ring->rx_stats.alloc_buff_failed++;
-                               bi->dma = 0;
-                               goto no_buffers;
-                       }
-               }
+       /* map page for use */
+       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 
 
-               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-               rx_desc->read.hdr_addr = 0;
-               i++;
-               if (i == rx_ring->count)
-                       i = 0;
+       /* if mapping failed free memory back to system since
+        * there isn't much point in holding memory we can't use
+        */
+       if (dma_mapping_error(rx_ring->dev, dma)) {
+               __free_pages(page, 0);
+               rx_ring->rx_stats.alloc_page_failed++;
+               return false;
        }
 
        }
 
-no_buffers:
-       if (rx_ring->next_to_use != i)
-               i40e_release_rx_desc(rx_ring, i);
+       bi->dma = dma;
+       bi->page = page;
+       bi->page_offset = 0;
+
+       return true;
 }
 
 /**
 }
 
 /**
@@ -1336,97 +1245,190 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
                             struct sk_buff *skb, u16 vlan_tag)
 {
        struct i40e_q_vector *q_vector = rx_ring->q_vector;
                             struct sk_buff *skb, u16 vlan_tag)
 {
        struct i40e_q_vector *q_vector = rx_ring->q_vector;
+#ifdef HAVE_VLAN_RX_REGISTER
        struct i40e_vsi *vsi = rx_ring->vsi;
        struct i40e_vsi *vsi = rx_ring->vsi;
-       u64 flags = vsi->back->flags;
+#endif
+       u64 flags = rx_ring->vsi->back->flags;
 
 #ifdef HAVE_VLAN_RX_REGISTER
        if (vlan_tag & VLAN_VID_MASK) {
                if (!vsi->vlgrp)
                        dev_kfree_skb_any(skb);
 
 #ifdef HAVE_VLAN_RX_REGISTER
        if (vlan_tag & VLAN_VID_MASK) {
                if (!vsi->vlgrp)
                        dev_kfree_skb_any(skb);
-               else if (i40e_qv_busy_polling(q_vector))
-                       netif_receive_skb(skb);
                else if (flags & I40E_FLAG_IN_NETPOLL)
                        vlan_hwaccel_rx(skb, vsi->vlgrp, vlan_tag);
                else
                        vlan_gro_receive(&q_vector->napi, vsi->vlgrp,
                                         vlan_tag, skb);
        } else {
                else if (flags & I40E_FLAG_IN_NETPOLL)
                        vlan_hwaccel_rx(skb, vsi->vlgrp, vlan_tag);
                else
                        vlan_gro_receive(&q_vector->napi, vsi->vlgrp,
                                         vlan_tag, skb);
        } else {
-               if (i40e_qv_busy_polling(q_vector))
-                       netif_receive_skb(skb);
-               else if (flags & I40E_FLAG_IN_NETPOLL)
+               if (flags & I40E_FLAG_IN_NETPOLL)
                        netif_rx(skb);
                else
                        napi_gro_receive(&q_vector->napi, skb);
        }
 #else /* HAVE_VLAN_RX_REGISTER */
                        netif_rx(skb);
                else
                        napi_gro_receive(&q_vector->napi, skb);
        }
 #else /* HAVE_VLAN_RX_REGISTER */
-       if (vlan_tag & VLAN_VID_MASK)
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+       if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           (vlan_tag & VLAN_VID_MASK))
+#else
+       if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_RX) &&
+           (vlan_tag & VLAN_VID_MASK))
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
-       if (i40e_qv_busy_polling(q_vector))
-               netif_receive_skb(skb);
-       else if (flags & I40E_FLAG_IN_NETPOLL)
+       if (flags & I40E_FLAG_IN_NETPOLL)
                netif_rx(skb);
        else
                napi_gro_receive(&q_vector->napi, skb);
 #endif /* HAVE_VLAN_RX_REGISTER */
 }
 
                netif_rx(skb);
        else
                napi_gro_receive(&q_vector->napi, skb);
 #endif /* HAVE_VLAN_RX_REGISTER */
 }
 
-#ifdef HAVE_VXLAN_RX_OFFLOAD
 /**
 /**
- * i40e_set_transport_header - adjust skb transport header for VXLAN traffic
- * @skb: the skb to be adjusted
+ * i40e_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ *
+ * Returns false if all allocations were successful, true if any fail
  **/
  **/
-static inline void i40e_set_transport_header(struct sk_buff *skb)
+bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
 {
 {
-       unsigned int vlan_header;
+       u16 ntu = rx_ring->next_to_use;
+       union i40e_rx_desc *rx_desc;
+       struct i40e_rx_buffer *bi;
 
 
-       /* Add 4 bytes for VLAN tagged packets */
-       if (skb->protocol == htons(ETH_P_8021Q) ||
-           skb->protocol == htons(ETH_P_8021AD))
-               vlan_header = VLAN_HLEN;
-       else
-               vlan_header = 0;
+       /* do nothing if no valid netdev defined */
+       if (!rx_ring->netdev || !cleaned_count)
+               return false;
+
+       rx_desc = I40E_RX_DESC(rx_ring, ntu);
+       bi = &rx_ring->rx_bi[ntu];
+
+       do {
+               if (!i40e_alloc_mapped_page(rx_ring, bi))
+                       goto no_buffers;
+
+               /* Refresh the desc even if buffer_addrs didn't change
+                * because each write-back erases this info.
+                */
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+               rx_desc->read.hdr_addr = 0;
+
+               rx_desc++;
+               bi++;
+               ntu++;
+               if (unlikely(ntu == rx_ring->count)) {
+                       rx_desc = I40E_RX_DESC(rx_ring, 0);
+                       bi = rx_ring->rx_bi;
+                       ntu = 0;
+               }
+
+               /* clear the status bits for the next_to_use descriptor */
+               rx_desc->wb.qword1.status_error_len = 0;
+
+               cleaned_count--;
+       } while (cleaned_count);
+
+       if (rx_ring->next_to_use != ntu)
+               i40e_release_rx_desc(rx_ring, ntu);
+
+       return false;
+
+no_buffers:
+       if (rx_ring->next_to_use != ntu)
+               i40e_release_rx_desc(rx_ring, ntu);
+
+       /* make sure to come back via polling to try again after
+        * allocation failure
+        */
+       return true;
+}
+
+#ifdef I40E_ADD_PROBES
+static void i40e_rx_extra_counters(struct i40e_vsi *vsi, u32 rx_error,
+                                  const struct i40e_rx_ptype_decoded decoded)
+{
+       bool ipv4;
+
+       ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+              (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
+
+       if (ipv4 &&
+           (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+                        BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+               vsi->back->rx_ip4_cso_err++;
+
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) {
+               if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
+                       vsi->back->rx_tcp_cso_err++;
+               else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
+                       vsi->back->rx_udp_cso_err++;
+               else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
+                       vsi->back->rx_sctp_cso_err++;
+       }
 
 
-       /* set header to L3 of FC */
-       skb_set_transport_header(skb, (skb_mac_header(skb) - skb->data) +
-                                sizeof(struct ethhdr) +
-                                vlan_header + ip_hdr(skb)->ihl * 4);
+       if ((decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+           (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4))
+               vsi->back->rx_ip4_cso++;
+       if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
+               vsi->back->rx_tcp_cso++;
+       else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
+               vsi->back->rx_udp_cso++;
+       else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
+               vsi->back->rx_sctp_cso++;
 }
 
 }
 
-#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#endif /* I40E_ADD_PROBES */
+#if defined(HAVE_VXLAN_RX_OFFLOAD) || defined(HAVE_GENEVE_RX_OFFLOAD)
+#if defined(HAVE_SKBUFF_CSUM_LEVEL) || defined(ESX55)
+#define I40E_TUNNEL_SUPPORT
+#endif
+#endif
 /**
  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
  * @vsi: the VSI we care about
  * @skb: skb currently being received and modified
 /**
  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
  * @vsi: the VSI we care about
  * @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
  **/
 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    struct sk_buff *skb,
  **/
 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    struct sk_buff *skb,
-                                   u32 rx_status,
-                                   u32 rx_error,
-                                   u16 rx_ptype)
+                                   union i40e_rx_desc *rx_desc)
 {
 {
-       struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
-       bool ipv4 = false, ipv6 = false;
-#ifdef HAVE_VXLAN_RX_OFFLOAD
-       bool ipv4_tunnel, ipv6_tunnel;
-       __wsum rx_udp_csum;
-       struct iphdr *iph;
-       __sum16 csum;
-
-       ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
-                     (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
-       ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
-                     (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
-#ifndef HAVE_SKBUFF_CSUM_LEVEL
-       skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
+       struct i40e_rx_ptype_decoded decoded;
+       bool ipv4, ipv6;
+       u32 rx_error, rx_status;
+#ifdef I40E_TUNNEL_SUPPORT
+       bool tunnel = false;
+#endif
+       u8 ptype;
+       u64 qword;
+
+       qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+       rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+                  I40E_RXD_QW1_ERROR_SHIFT;
+       rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                   I40E_RXD_QW1_STATUS_SHIFT;
+       decoded = decode_rx_desc_ptype(ptype);
+
+#ifdef I40E_TUNNEL_SUPPORT
+       if (decoded.known &&
+           ((decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP) ||
+            (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP) ||
+            (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)))
+               tunnel = true;
+       else
+               tunnel = false;
+#ifdef HAVE_SKBUFF_CSUM_LEVEL
+
+       skb->encapsulation = tunnel ? 1 : 0;
 #endif
 #endif
-#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#endif /* I40E_TUNNEL_SUPPORT */
 
        skb->ip_summed = CHECKSUM_NONE;
 
 
        skb->ip_summed = CHECKSUM_NONE;
 
+       skb_checksum_none_assert(skb);
+
        /* Rx csum enabled and ip headers found? */
 #ifdef HAVE_NDO_SET_FEATURES
        if (!(vsi->netdev->features & NETIF_F_RXCSUM))
        /* Rx csum enabled and ip headers found? */
 #ifdef HAVE_NDO_SET_FEATURES
        if (!(vsi->netdev->features & NETIF_F_RXCSUM))
@@ -1444,24 +1446,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        if (!(decoded.known && decoded.outer_ip))
                return;
 
        if (!(decoded.known && decoded.outer_ip))
                return;
 
-       if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
-           decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
-               ipv4 = true;
-       else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
-                decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
-               ipv6 = true;
+       ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+              (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
+       ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+              (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
+
+#ifdef I40E_ADD_PROBES
+       i40e_rx_extra_counters(vsi, rx_error, decoded);
 
 
+#endif /* I40E_ADD_PROBES */
        if (ipv4 &&
            (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
                         BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
        if (ipv4 &&
            (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
                         BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
-#ifdef I40E_ADD_PROBES
-       {
-               vsi->back->rx_ip4_cso_err++;
-               goto checksum_fail;
-       }
-#else
                goto checksum_fail;
                goto checksum_fail;
-#endif
 
        /* likely incorrect csum if alternate IP extension headers found */
        if (ipv6 &&
 
        /* likely incorrect csum if alternate IP extension headers found */
        if (ipv6 &&
@@ -1469,16 +1466,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                /* don't increment checksum err here, non-fatal err */
                return;
 
                /* don't increment checksum err here, non-fatal err */
                return;
 
-#ifdef I40E_ADD_PROBES
-       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) {
-               if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
-                       vsi->back->rx_tcp_cso_err++;
-               else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
-                       vsi->back->rx_udp_cso_err++;
-               else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
-                       vsi->back->rx_sctp_cso_err++;
-       }
-#endif
        /* there was some L4 error, count error and punt packet to the stack */
        if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
                goto checksum_fail;
        /* there was some L4 error, count error and punt packet to the stack */
        if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
                goto checksum_fail;
@@ -1490,79 +1477,31 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
        if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
-#ifdef HAVE_VXLAN_RX_OFFLOAD
-       /* If VXLAN traffic has an outer UDPv4 checksum we need to check
-        * it in the driver, hardware does not do it for us.
-        * Since L3L4P bit was set we assume a valid IHL value (>=5)
-        * so the total length of IPv4 header is IHL*4 bytes
-        * The UDP_0 bit *may* bet set if the *inner* header is UDP
+       /* The hardware supported by this driver does not validate outer
+        * checksums for tunneled VXLAN or GENEVE frames.  I don't agree
+        * with it but the specification states that you "MAY validate", it
+        * doesn't make it a hard requirement so if we have validated the
+        * inner checksum report CHECKSUM_UNNECESSARY.
         */
         */
-       if (ipv4_tunnel) {
-               i40e_set_transport_header(skb);
-               if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
-                   (udp_hdr(skb)->check != 0)) {
-                       rx_udp_csum = udp_csum(skb);
-                       iph = ip_hdr(skb);
-                       csum = csum_tcpudp_magic(
-                                       iph->saddr, iph->daddr,
-                                       (skb->len - skb_transport_offset(skb)),
-                                       IPPROTO_UDP, rx_udp_csum);
-
-                       if (udp_hdr(skb)->check != csum)
-                               goto checksum_fail;
-
-               } /* else its GRE and so no outer UDP header */
-       }
-#endif /* HAVE_VXLAN_RX_OFFLOAD */
-#ifdef I40E_ADD_PROBES
-       if ((decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
-           (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4))
-               vsi->back->rx_ip4_cso++;
-       if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
-               vsi->back->rx_tcp_cso++;
-       else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
-               vsi->back->rx_udp_cso++;
-       else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
-               vsi->back->rx_sctp_cso++;
-#endif
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
 #ifdef HAVE_SKBUFF_CSUM_LEVEL
 #ifdef HAVE_SKBUFF_CSUM_LEVEL
-       skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+       skb->csum_level = tunnel ? 1 : 0;
 #endif
 #endif
+
        return;
 
 checksum_fail:
        vsi->back->hw_csum_rx_error++;
 }
 
        return;
 
 checksum_fail:
        vsi->back->hw_csum_rx_error++;
 }
 
-#ifdef NETIF_F_RXHASH
 /**
 /**
- * i40e_rx_hash - returns the hash value from the Rx descriptor
- * @ring: descriptor ring
- * @rx_desc: specific descriptor
- **/
-static inline u32 i40e_rx_hash(struct i40e_ring *ring,
-                              union i40e_rx_desc *rx_desc)
-{
-       const __le64 rss_mask =
-               cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
-                           I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
-
-       if ((ring->netdev->features & NETIF_F_RXHASH) &&
-           (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
-               return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
-       else
-               return 0;
-}
-
-/**
- * i40e_ptype_to_hash - get a hash type
+ * i40e_ptype_to_htype - get a hash type
  * @ptype: the ptype value from the descriptor
  *
  * Returns a hash type to be used by skb_set_hash
  **/
  * @ptype: the ptype value from the descriptor
  *
  * Returns a hash type to be used by skb_set_hash
  **/
-static inline int i40e_ptype_to_hash(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
 {
        struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
 
 {
        struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
 
@@ -1579,323 +1518,454 @@ static inline int i40e_ptype_to_hash(u8 ptype)
                return PKT_HASH_TYPE_L2;
 }
 
                return PKT_HASH_TYPE_L2;
 }
 
+/**
+ * i40e_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline void i40e_rx_hash(struct i40e_ring *ring,
+                               union i40e_rx_desc *rx_desc,
+                               struct sk_buff *skb,
+                               u8 rx_ptype)
+{
+#ifdef NETIF_F_RXHASH
+       u32 hash;
+       const __le64 rss_mask =
+               cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+                           I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+       if (ring->netdev->features & NETIF_F_RXHASH)
+               return;
+
+       if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
+               hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+               skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+       }
 #endif /* NETIF_F_RXHASH */
 #endif /* NETIF_F_RXHASH */
+}
+
 /**
 /**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
  *
  *
- * Returns number of packets cleaned
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
  **/
  **/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
+static inline
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+                            u8 rx_ptype)
 {
 {
-       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-       u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
-       u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       const int current_node = numa_node_id();
-       struct i40e_vsi *vsi = rx_ring->vsi;
-       u16 i = rx_ring->next_to_clean;
-       union i40e_rx_desc *rx_desc;
-       u32 rx_error, rx_status;
-       u8 rx_ptype;
-       u64 qword;
+#ifdef HAVE_PTP_1588_CLOCK
+       u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                       I40E_RXD_QW1_STATUS_SHIFT;
+       u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+                  I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
 
 
-       do {
-               struct i40e_rx_buffer *rx_bi;
-               struct sk_buff *skb;
-               u16 vlan_tag;
-               /* return some buffers to hardware, one at a time is too slow */
-               if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
-                       cleaned_count = 0;
-               }
+       if (unlikely(rsyn)) {
+               i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
+               rx_ring->last_rx_timestamp = jiffies;
+       }
+#endif /* HAVE_PTP_1588_CLOCK */
 
 
-               i = rx_ring->next_to_clean;
-               rx_desc = I40E_RX_DESC(rx_ring, i);
-               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-               rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                       I40E_RXD_QW1_STATUS_SHIFT;
+       i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
 
 
-               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
-                       break;
+       /* modifies the skb - consumes the enet header */
+       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
 
-               /* This memory barrier is needed to keep us from reading
-                * any other fields out of the rx_desc until we know the
-                * DD bit is set.
-                */
-               rmb();
-               if (i40e_rx_is_programming_status(qword)) {
-                       i40e_clean_programming_status(rx_ring, rx_desc);
-                       I40E_RX_INCREMENT(rx_ring, i);
-                       continue;
-               }
-               rx_bi = &rx_ring->rx_bi[i];
-               skb = rx_bi->skb;
-               if (likely(!skb)) {
-                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
-                                                       rx_ring->rx_hdr_len);
-                       if (!skb) {
-                               rx_ring->rx_stats.alloc_buff_failed++;
-                               break;
-                       }
+       i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
+}
 
 
-                       /* initialize queue mapping */
-                       skb_record_rx_queue(skb, rx_ring->queue_index);
-                       /* we are reusing so sync this buffer for CPU use */
-                       dma_sync_single_range_for_cpu(rx_ring->dev,
-                                                     rx_bi->dma,
-                                                     0,
-                                                     rx_ring->rx_hdr_len,
-                                                     DMA_FROM_DEVICE);
-               }
-               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-               rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
-                               I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
-               rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
-                        I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
-               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-                          I40E_RXD_QW1_ERROR_SHIFT;
-               rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
-               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       unsigned char *va;
+       unsigned int pull_len;
 
 
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-               prefetch(rx_bi->page);
-               rx_bi->skb = NULL;
-               cleaned_count++;
-               if (rx_hbo || rx_sph) {
-                       int len;
+       /* it is valid to use page_address instead of kmap since we are
+        * working with pages allocated out of the lomem pool per
+        * alloc_page(GFP_ATOMIC)
+        */
+       va = skb_frag_address(frag);
 
 
-                       if (rx_hbo)
-                               len = I40E_RX_HDR_SIZE;
-                       else
-                               len = rx_header_len;
-                       memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
-               } else if (skb->len == 0) {
-                       int len;
-
-                       len = (rx_packet_len > skb_headlen(skb) ?
-                               skb_headlen(skb) : rx_packet_len);
-                       memcpy(__skb_put(skb, len),
-                              rx_bi->page + rx_bi->page_offset,
-                              len);
-                       rx_bi->page_offset += len;
-                       rx_packet_len -= len;
-               }
+       /* we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
 
 
-               /* Get the rest of the data if this was a header split */
-               if (rx_packet_len) {
-                       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                                          rx_bi->page,
-                                          rx_bi->page_offset,
-                                          rx_packet_len);
+       /* align pull length to size of long to optimize memcpy performance */
+       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
 
 
-                       skb->len += rx_packet_len;
-                       skb->data_len += rx_packet_len;
-                       skb->truesize += rx_packet_len;
+       /* update all of the pointers */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+}
 
 
-                       if ((page_count(rx_bi->page) == 1) &&
-                           (page_to_nid(rx_bi->page) == current_node))
-                               get_page(rx_bi->page);
-                       else
-                               rx_bi->page = NULL;
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+       /* place header in linear portion of buffer */
+       if (skb_is_nonlinear(skb))
+               i40e_pull_tail(rx_ring, skb);
 
 
-                       dma_unmap_page(rx_ring->dev,
-                                      rx_bi->page_dma,
-                                      PAGE_SIZE / 2,
-                                      DMA_FROM_DEVICE);
-                       rx_bi->page_dma = 0;
-               }
-               I40E_RX_INCREMENT(rx_ring, i);
+       /* if eth_skb_pad returns an error the skb was freed */
+       if (eth_skb_pad(skb))
+               return true;
 
 
-               if (unlikely(
-                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-                       struct i40e_rx_buffer *next_buffer;
+       return false;
+}
 
 
-                       next_buffer = &rx_ring->rx_bi[i];
-                       next_buffer->skb = skb;
-                       rx_ring->rx_stats.non_eop_descs++;
-                       continue;
-               }
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+                              struct i40e_rx_buffer *old_buff)
+{
+       struct i40e_rx_buffer *new_buff;
+       u16 nta = rx_ring->next_to_alloc;
 
 
-               /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
+       new_buff = &rx_ring->rx_bi[nta];
 
 
-#ifdef NETIF_F_RXHASH
-               skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
-                            i40e_ptype_to_hash(rx_ptype));
+       /* update, and store next to alloc */
+       nta++;
+       rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+       /* transfer page from old buffer to new buffer */
+       *new_buff = *old_buff;
+}
+
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+                            struct i40e_rx_buffer *rx_buffer,
+                            union i40e_rx_desc *rx_desc,
+                            struct sk_buff *skb)
+{
+       struct page *page = rx_buffer->page;
+       u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+                           I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
 #endif
 #endif
-#ifdef HAVE_PTP_1588_CLOCK
-               if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
-                       i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
-                                          I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
-                                          I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
-                       rx_ring->last_rx_timestamp = jiffies;
-               }
 
 
-#endif /* HAVE_PTP_1588_CLOCK */
-               /* probably a little skewed due to removing CRC */
-               total_rx_bytes += skb->len;
-               total_rx_packets++;
+       /* will the data fit in the skb we allocated? if so, just
+        * copy it as it is pretty small anyway
+        */
+       if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+               unsigned char *va = page_address(page) + rx_buffer->page_offset;
 
 
-               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
 
-               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+               /* page is not reserved, we can reuse buffer as-is */
+               if (likely(!i40e_page_is_reserved(page)))
+                       return true;
 
 
-               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-                        : 0;
-#ifdef I40E_FCOE
-               if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
+               /* this page cannot be reused so discard it */
+               __free_pages(page, 0);
+               return false;
+       }
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                       rx_buffer->page_offset, size, truesize);
+
+       /* avoid re-using remote pages */
+       if (unlikely(i40e_page_is_reserved(page)))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
 #endif
 #endif
-               skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
-               i40e_receive_skb(rx_ring, skb, vlan_tag);
 
 
-               rx_desc->wb.qword1.status_error_len = 0;
+       /* Even if we own the page, we are not allowed to use atomic_set()
+        * This would break get_page_unless_zero() users.
+        */
+       get_page(rx_buffer->page);
 
 
-       } while (likely(total_rx_packets < budget));
+       return true;
+}
 
 
-       u64_stats_update_begin(&rx_ring->syncp);
-       rx_ring->stats.packets += total_rx_packets;
-       rx_ring->stats.bytes += total_rx_bytes;
-       u64_stats_update_end(&rx_ring->syncp);
-       rx_ring->q_vector->rx.total_packets += total_rx_packets;
-       rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+/**
+ * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
+ *
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
+                                    union i40e_rx_desc *rx_desc)
+{
+       struct i40e_rx_buffer *rx_buffer;
+       struct sk_buff *skb;
+       struct page *page;
 
 
-       return total_rx_packets;
+       rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+       page = rx_buffer->page;
+       prefetchw(page);
+
+       skb = rx_buffer->skb;
+
+       if (likely(!skb)) {
+               void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+               /* prefetch first cache line of first page */
+               prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+               prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+               /* allocate a skb to store the frags */
+               skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+                                      I40E_RX_HDR_SIZE,
+                                      GFP_ATOMIC | __GFP_NOWARN);
+               if (unlikely(!skb)) {
+                       rx_ring->rx_stats.alloc_buff_failed++;
+                       return NULL;
+               }
+
+               /* we will be copying header into skb->data in
+                * pskb_may_pull so it is in our interest to prefetch
+                * it now to avoid a possible cache miss
+                */
+               prefetchw(skb->data);
+
+               skb_record_rx_queue(skb, rx_ring->queue_index);
+       } else {
+               /* we are reusing so sync this buffer for CPU use */
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             rx_buffer->dma,
+                                             rx_buffer->page_offset,
+                                             I40E_RXBUFFER_2048,
+                                             DMA_FROM_DEVICE);
+
+               rx_buffer->skb = NULL;
+       }
+
+       /* pull page into skb */
+       if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+               /* hand second half of page back to the ring */
+               i40e_reuse_rx_page(rx_ring, rx_buffer);
+               rx_ring->rx_stats.page_reuse_count++;
+       } else {
+               /* we are not reusing the buffer so unmap it */
+               dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+                              DMA_FROM_DEVICE);
+       }
+
+       /* clear contents of buffer_info */
+       rx_buffer->page = NULL;
+
+       return skb;
 }
 
 /**
 }
 
 /**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
  *
  *
- * Returns number of packets cleaned
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
  **/
  **/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+                           union i40e_rx_desc *rx_desc,
+                           struct sk_buff *skb)
+{
+       u32 ntc = rx_ring->next_to_clean + 1;
+
+       /* fetch, update, and store next to clean */
+       ntc = (ntc < rx_ring->count) ? ntc : 0;
+       rx_ring->next_to_clean = ntc;
+
+       prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+#define staterrlen rx_desc->wb.qword1.status_error_len
+       if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
+               i40e_clean_programming_status(rx_ring, rx_desc);
+               rx_ring->rx_bi[ntc].skb = skb;
+               return true;
+       }
+       /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+       if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+               return false;
+
+       /* place skb in next buffer to be received */
+       rx_ring->rx_bi[ntc].skb = skb;
+       rx_ring->rx_stats.non_eop_descs++;
+
+       return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       struct i40e_vsi *vsi = rx_ring->vsi;
-       u16 i;
-       union i40e_rx_desc *rx_desc;
-       u32 rx_error, rx_status;
-       u16 rx_packet_len;
-       u8 rx_ptype;
-       u64 qword;
+       bool failure = false;
 
 
-       do {
-               struct i40e_rx_buffer *rx_bi;
+       while (likely(total_rx_packets < (unsigned int)budget)) {
+               union i40e_rx_desc *rx_desc;
                struct sk_buff *skb;
                struct sk_buff *skb;
+               u32 rx_status;
                u16 vlan_tag;
                u16 vlan_tag;
+               u8 rx_ptype;
+               u64 qword;
+
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-                       i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
+                       failure = failure ||
+                                 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
 
                        cleaned_count = 0;
                }
 
-               i = rx_ring->next_to_clean;
-               rx_desc = I40E_RX_DESC(rx_ring, i);
+               rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
                qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
                qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+                          I40E_RXD_QW1_PTYPE_SHIFT;
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-                       I40E_RXD_QW1_STATUS_SHIFT;
+                           I40E_RXD_QW1_STATUS_SHIFT;
 
                if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
 
                if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
+               /* status_error_len will always be zero for unused descriptors
+                * because it's cleared in cleanup, and overlaps with hdr_addr
+                * which is always zero because packet split isn't used, if the
+                * hardware wrote DD then it will be non-zero
+                */
+               if (!rx_desc->wb.qword1.status_error_len)
+                       break;
+
                /* This memory barrier is needed to keep us from reading
                 * any other fields out of the rx_desc until we know the
                 * DD bit is set.
                 */
                /* This memory barrier is needed to keep us from reading
                 * any other fields out of the rx_desc until we know the
                 * DD bit is set.
                 */
-               rmb();
+               dma_rmb();
 
 
-               if (i40e_rx_is_programming_status(qword)) {
-                       i40e_clean_programming_status(rx_ring, rx_desc);
-                       I40E_RX_INCREMENT(rx_ring, i);
-                       continue;
-               }
-               rx_bi = &rx_ring->rx_bi[i];
-               skb = rx_bi->skb;
-               prefetch(skb->data);
-
-               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
-               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-                          I40E_RXD_QW1_ERROR_SHIFT;
-               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+               skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+               if (!skb)
+                       break;
 
 
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-               rx_bi->skb = NULL;
                cleaned_count++;
 
                cleaned_count++;
 
-               /* Get the header and possibly the whole packet
-                * If this is an skb from previous receive dma will be 0
-                */
-               skb_put(skb, rx_packet_len);
-               dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
-                                DMA_FROM_DEVICE);
-               rx_bi->dma = 0;
-
-               I40E_RX_INCREMENT(rx_ring, i);
-
-               if (unlikely(
-                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-                       rx_ring->rx_stats.non_eop_descs++;
+               if (i40e_is_non_eop(rx_ring, rx_desc, skb))
                        continue;
                        continue;
-               }
 
 
-               /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
-                       dev_kfree_skb_any(skb);
+               if (i40e_cleanup_headers(rx_ring, skb))
                        continue;
                        continue;
-               }
 
 
-#ifdef NETIF_F_RXHASH
-               skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
-                            i40e_ptype_to_hash(rx_ptype));
-#endif
-#ifdef HAVE_PTP_1588_CLOCK
-               if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
-                       i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
-                                          I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
-                                          I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
-                       rx_ring->last_rx_timestamp = jiffies;
-               }
-
-#endif /* HAVE_PTP_1588_CLOCK */
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
-               total_rx_packets++;
 
 
-               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+               /* populate checksum, VLAN, and protocol */
+               i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
 
 
-               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
-
-               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-                        : 0;
 #ifdef I40E_FCOE
 #ifdef I40E_FCOE
-               if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+               if (unlikely(
+                   i40e_rx_is_fcoe(rx_ptype) &&
+                   !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
                        dev_kfree_skb_any(skb);
                        continue;
                }
 #endif
                        dev_kfree_skb_any(skb);
                        continue;
                }
 #endif
-               skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
-               i40e_receive_skb(rx_ring, skb, vlan_tag);
+               vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+                          le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
 
 
-               rx_desc->wb.qword1.status_error_len = 0;
-       } while (likely(total_rx_packets < budget));
+               i40e_receive_skb(rx_ring, skb, vlan_tag);
 
 
+               /* update budget accounting */
+               total_rx_packets++;
+       }
 
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
 
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
@@ -1904,7 +1974,8 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
-       return total_rx_packets;
+       /* guarantee a trip back through this routine if there was a failure */
+       return failure ? budget : (int)total_rx_packets;
 }
 
 static u32 i40e_buildreg_itr(const int type, const u16 itr)
 }
 
 static u32 i40e_buildreg_itr(const int type, const u16 itr)
@@ -1912,7 +1983,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
        u32 val;
 
        val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
        u32 val;
 
        val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+             /* Don't clear PBA because that can cause lost interrupts that
+              * came in while we were cleaning/polling
+              */
              (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
              (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
 
              (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
              (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
 
@@ -1938,7 +2011,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
 
        vector = (q_vector->v_idx + vsi->base_vector);
 
 
        vector = (q_vector->v_idx + vsi->base_vector);
 
-       /* avoid dynamic calulation if in countdown mode OR if
+       /* avoid dynamic calculation if in countdown mode OR if
         * all dynamic is disabled
         */
        rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
         * all dynamic is disabled
         */
        rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
@@ -1962,7 +2035,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
        if (rx || tx) {
                /* get the higher of the two ITR adjustments and
                 * use the same value for both ITR registers
        if (rx || tx) {
                /* get the higher of the two ITR adjustments and
                 * use the same value for both ITR registers
-                * when in adaptive mode (rx and/or tx)
+                * when in adaptive mode (Rx and/or Tx)
                 */
                u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
 
                 */
                u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
 
@@ -1994,7 +2067,6 @@ enable_int:
                q_vector->itr_countdown--;
        else
                q_vector->itr_countdown = ITR_COUNTDOWN_START;
                q_vector->itr_countdown--;
        else
                q_vector->itr_countdown = ITR_COUNTDOWN_START;
-
 }
 
 /**
 }
 
 /**
@@ -2016,7 +2088,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        bool arm_wb = false;
        struct i40e_ring *ring;
        int budget_per_ring;
        bool arm_wb = false;
        struct i40e_ring *ring;
        int budget_per_ring;
-       int cleaned;
+       int work_done = 0;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
                napi_complete(napi);
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
                napi_complete(napi);
@@ -2028,17 +2100,17 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
         */
        i40e_for_each_ring(ring, q_vector->tx) {
                clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
         */
        i40e_for_each_ring(ring, q_vector->tx) {
                clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
-               arm_wb |= ring->arm_wb;
+               arm_wb = arm_wb || ring->arm_wb;
                ring->arm_wb = false;
        }
 
                ring->arm_wb = false;
        }
 
-       /* if i40e_busy_poll() has the vector or netpoll flag is set
-        * then, we skip rx
-        */
+       /* if the netpoll flag is set then we skip rx */
        if (flags & I40E_FLAG_IN_NETPOLL)
        if (flags & I40E_FLAG_IN_NETPOLL)
-               return budget;
-       else if (!i40e_qv_lock_napi(q_vector))
-               return budget;
+               goto tx_only;
+
+       /* Handle case where we are called by netpoll with a budget of 0 */
+       if (budget <= 0)
+               goto tx_only;
 
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
 
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
@@ -2046,17 +2118,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
        i40e_for_each_ring(ring, q_vector->rx) {
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
        i40e_for_each_ring(ring, q_vector->rx) {
+               int cleaned;
 
 
-               if (ring_is_ps_enabled(ring))
-                       cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
-               else
-                       cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+               cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
+               work_done += cleaned;
                /* if we didn't clean as many as budgeted, we must be done */
                clean_complete &= (budget_per_ring != cleaned);
        }
 
                /* if we didn't clean as many as budgeted, we must be done */
                clean_complete &= (budget_per_ring != cleaned);
        }
 
-       i40e_qv_unlock_napi(q_vector);
-
 #ifndef HAVE_NETDEV_NAPI_LIST
        /* if netdev is disabled we need to stop polling */
        if (!netif_running(vsi->netdev))
 #ifndef HAVE_NETDEV_NAPI_LIST
        /* if netdev is disabled we need to stop polling */
        if (!netif_running(vsi->netdev))
@@ -2065,42 +2134,35 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
 #endif
        /* If work not completed, return budget and polling will return */
        if (!clean_complete) {
 #endif
        /* If work not completed, return budget and polling will return */
        if (!clean_complete) {
-               if (arm_wb)
-                       i40e_force_wb(vsi, q_vector);
+tx_only:
+               if (arm_wb){
+                       q_vector->tx.ring[0].tx_stats.tx_force_wb++;
+                       i40e_enable_wb_on_itr(vsi, q_vector);
+               }
                return budget;
        }
 
                return budget;
        }
 
+       if (flags & I40E_TXR_FLAGS_WB_ON_ITR)
+               q_vector->arm_wb_state = false;
+
        /* Work is done so exit the polling mode and re-enable the interrupt */
        /* Work is done so exit the polling mode and re-enable the interrupt */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
                i40e_update_enable_itr(vsi, q_vector);
        } else { /* Legacy mode */
        if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
                i40e_update_enable_itr(vsi, q_vector);
        } else { /* Legacy mode */
-               struct i40e_hw *hw = &vsi->back->hw;
-               /* We re-enable the queue 0 cause, but
-                * don't worry about dynamic_enable
-                * because we left it on for the other
-                * possible interrupts during napi
-                */
-               u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
-
-               qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
-               wr32(hw, I40E_QINT_RQCTL(0), qval);
-               qval = rd32(hw, I40E_QINT_TQCTL(0));
-               qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
-               wr32(hw, I40E_QINT_TQCTL(0), qval);
-               i40e_irq_dynamic_enable_icr0(vsi->back);
+               i40e_irq_dynamic_enable_icr0(vsi->back, false);
        }
        return 0;
 }
        }
        return 0;
 }
+
 /**
  * i40e_atr - Add a Flow Director ATR filter
  * @tx_ring:  ring to add programming descriptor to
  * @skb:      send buffer
  * @tx_flags: send tx flags
 /**
  * i40e_atr - Add a Flow Director ATR filter
  * @tx_ring:  ring to add programming descriptor to
  * @skb:      send buffer
  * @tx_flags: send tx flags
- * @protocol: wire protocol
  **/
 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
  **/
 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                    u32 tx_flags, __be16 protocol)
+                    u32 tx_flags)
 {
        struct i40e_filter_program_desc *fdir_desc;
        struct i40e_pf *pf = tx_ring->vsi->back;
 {
        struct i40e_filter_program_desc *fdir_desc;
        struct i40e_pf *pf = tx_ring->vsi->back;
@@ -2112,6 +2174,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        struct tcphdr *th;
        unsigned int hlen;
        u32 flex_ptype, dtype_cmd;
        struct tcphdr *th;
        unsigned int hlen;
        u32 flex_ptype, dtype_cmd;
+       int l4_proto;
        u16 i;
 
        /* make sure ATR is enabled */
        u16 i;
 
        /* make sure ATR is enabled */
@@ -2125,39 +2188,37 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (!tx_ring->atr_sample_rate)
                return;
 
        if (!tx_ring->atr_sample_rate)
                return;
 
+       /* Currently only IPv4/IPv6 with TCP is supported */
        if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
                return;
 
        if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
                return;
 
+       /* snag network header to get L4 type and address */
 #ifdef HAVE_SKB_INNER_NETWORK_HEADER
 #ifdef HAVE_SKB_INNER_NETWORK_HEADER
-       if ((tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
-               hdr.network = skb_inner_network_header(skb);
-               hlen = skb_inner_network_header_len(skb);
-       } else {
-#endif
-               /* snag network header to get L4 type and address */
-               hdr.network = skb_network_header(skb);
-
-               /* Currently only IPv4/IPv6 with TCP is supported */
-               /* access ihl as u8 to avoid unaligned access on ia64 */
-               if (tx_flags & I40E_TX_FLAGS_IPV4)
-                       hlen = (hdr.network[0] & 0x0F) << 2;
-               else if (protocol == htons(ETH_P_IPV6))
-                       hlen = sizeof(struct ipv6hdr);
-               else
-                       return;
-#ifdef HAVE_SKB_INNER_NETWORK_HEADER
-       }
-#endif
+       hdr.network = (tx_flags & I40E_TX_FLAGS_TUNNEL) ?
+                     skb_inner_network_header(skb) : skb_network_header(skb);
+#else
+       hdr.network = skb_network_header(skb);
+#endif /* HAVE_SKB_INNER_NETWORK_HEADER */
 
 
-       /* Currently only IPv4/IPv6 with TCP is supported */
        /* Note: tx_flags gets modified to reflect inner protocols in
         * tx_enable_csum function if encap is enabled.
         */
        /* Note: tx_flags gets modified to reflect inner protocols in
         * tx_enable_csum function if encap is enabled.
         */
-       if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
-           (hdr.ipv4->protocol != IPPROTO_TCP))
-               return;
-       else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
-                  (hdr.ipv6->nexthdr != IPPROTO_TCP))
+       if (tx_flags & I40E_TX_FLAGS_IPV4) {
+               /* access ihl as u8 to avoid unaligned access on ia64 */
+               hlen = (hdr.network[0] & 0x0F) << 2;
+               l4_proto = hdr.ipv4->protocol;
+       } else {
+               /* find the start of the innermost ipv6 header */
+               unsigned int inner_hlen = hdr.network - skb->data;
+               unsigned int h_offset = inner_hlen;
+
+               /* this function updates h_offset to the end of the header */
+               l4_proto = ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
+               /* hlen will contain our best estimate of the tcp header */
+               hlen = h_offset - inner_hlen;
+       }
+
+       if (l4_proto != IPPROTO_TCP)
                return;
 
        th = (struct tcphdr *)(hdr.network + hlen);
                return;
 
        th = (struct tcphdr *)(hdr.network + hlen);
@@ -2166,6 +2227,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
                return;
 
        if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
                return;
 
+       if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
+           (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
+               /* HW ATR eviction will take care of removing filters on FIN
+                * and RST packets.
+                */
+               if (th->fin || th->rst)
+                       return;
+       }
+
        tx_ring->atr_count++;
 
        /* sample on all syn/fin/rst packets or once every atr sample rate */
        tx_ring->atr_count++;
 
        /* sample on all syn/fin/rst packets or once every atr sample rate */
@@ -2186,7 +2256,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
                      I40E_TXD_FLTR_QW0_QINDEX_MASK;
 
        flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
                      I40E_TXD_FLTR_QW0_QINDEX_MASK;
-       flex_ptype |= (protocol == htons(ETH_P_IP)) ?
+       flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
                      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
                       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
                      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
                      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
                       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
                      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
@@ -2209,7 +2279,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
 
        dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
                     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
 
        dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
-       if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+       if (!(tx_flags & I40E_TX_FLAGS_TUNNEL))
                dtype_cmd |=
                        ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
                        I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
                dtype_cmd |=
                        ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
                        I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
@@ -2220,6 +2290,10 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
                        I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
                        I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
                        I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
+       if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
+           (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
+               dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
+
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
        fdir_desc->rsvd = cpu_to_le32(0);
        fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
        fdir_desc->rsvd = cpu_to_le32(0);
        fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
@@ -2240,7 +2314,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
  **/
 #ifdef I40E_FCOE
 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
  **/
 #ifdef I40E_FCOE
 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                                     struct i40e_ring *tx_ring, u32 *flags)
+                                     struct i40e_ring *tx_ring,
+                                     u32 *flags)
 #else
 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                             struct i40e_ring *tx_ring,
 #else
 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                             struct i40e_ring *tx_ring,
@@ -2296,10 +2371,11 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
                if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
                        struct vlan_ethhdr *vhdr;
                                I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
                if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
                        struct vlan_ethhdr *vhdr;
+                       int rc;
 
 
-                       if (skb_header_cloned(skb) &&
-                           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
-                               return -ENOMEM;
+                       rc = skb_cow_head(skb, 0);
+                       if (rc < 0)
+                               return rc;
                        vhdr = (struct vlan_ethhdr *)skb->data;
                        vhdr->h_vlan_TCI = htons(tx_flags >>
                                                 I40E_TX_FLAGS_VLAN_SHIFT);
                        vhdr = (struct vlan_ethhdr *)skb->data;
                        vhdr->h_vlan_TCI = htons(tx_flags >>
                                                 I40E_TX_FLAGS_VLAN_SHIFT);
@@ -2313,90 +2389,117 @@ out:
        return 0;
 }
 
        return 0;
 }
 
+#ifndef HAVE_ENCAP_TSO_OFFLOAD
+#define inner_ip_hdr(skb) 0
+#define inner_tcp_hdr(skb) 0
+#define inner_ipv6_hdr(skb) 0
+#define inner_tcp_hdrlen(skb) 0
+#define inner_tcp_hdrlen(skb) 0
+#define skb_inner_transport_header(skb) ((skb)->data)
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
 /**
  * i40e_tso - set up the tso context descriptor
 /**
  * i40e_tso - set up the tso context descriptor
- * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
  * @hdr_len:  ptr to the size of the packet header
  * @skb:      ptr to the skb we're sending
  * @hdr_len:  ptr to the size of the packet header
- * @cd_tunneling: ptr to context descriptor bits
+ * @cd_type_cmd_tso_mss: Quad Word 1
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                   u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
-                   u32 *cd_tunneling)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
 {
 {
-       u32 cd_cmd, cd_tso_len, cd_mss;
-       struct tcphdr *tcph;
-       struct iphdr *iph;
-       u32 l4len;
+       u64 cd_cmd, cd_tso_len, cd_mss;
+       union {
+               struct iphdr *v4;
+               struct ipv6hdr *v6;
+               unsigned char *hdr;
+       } ip;
+       union {
+               struct tcphdr *tcp;
+               struct udphdr *udp;
+               unsigned char *hdr;
+       } l4;
+       u32 paylen, l4_offset;
        int err;
        int err;
-#ifdef NETIF_F_TSO6
-       struct ipv6hdr *ipv6h;
-#endif
-#ifdef HAVE_ENCAP_TSO_OFFLOAD
-       bool enc = skb->encapsulation;
-#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
 
        if (!skb_is_gso(skb))
                return 0;
 
 
        if (!skb_is_gso(skb))
                return 0;
 
-       if (skb_header_cloned(skb)) {
-               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-               if (err)
-                       return err;
+       err = skb_cow_head(skb, 0);
+       if (err < 0)
+               return err;
+
+       ip.hdr = skb_network_header(skb);
+       l4.hdr = skb_transport_header(skb);
+
+       /* initialize outer IP header fields */
+       if (ip.v4->version == 4) {
+               ip.v4->tot_len = 0;
+               ip.v4->check = 0;
+       } else {
+               ip.v6->payload_len = 0;
        }
        }
-#ifdef HAVE_ENCAP_TSO_OFFLOAD
-       iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb);
-#else
-       iph = ip_hdr(skb);
-#endif
 
 
-       if (iph->version == 4) {
 #ifdef HAVE_ENCAP_TSO_OFFLOAD
 #ifdef HAVE_ENCAP_TSO_OFFLOAD
-               iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb);
-               tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
-#else
-               iph = ip_hdr(skb);
-               tcph = tcp_hdr(skb);
-#endif /* HAVE_ENCAP_TSO_OFFLOAD */
-               iph->tot_len = 0;
-               iph->check = 0;
-               tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                0, IPPROTO_TCP, 0);
-#ifdef NETIF_F_TSO6
-       } else if (skb_is_gso_v6(skb)) {
-#ifdef HAVE_ENCAP_TSO_OFFLOAD
-               ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
-               tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
-#else
-               ipv6h = ipv6_hdr(skb);
-               tcph = tcp_hdr(skb);
-#endif /* HAVE_ENCAP_TSO_OFFLOAD */
-               ipv6h->payload_len = 0;
-               tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
-                                              0, IPPROTO_TCP, 0);
-#endif /* NETIF_F_TSO6 */
+       if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+                                        SKB_GSO_UDP_TUNNEL_CSUM)) {
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+                       /* determine offset of outer transport header */
+                       l4_offset = l4.hdr - skb->data;
+
+                       /* remove payload length from the outer checksum.
+                        * This code works a lot like csum_tcpudp_nofold,
+                        * and is endian correct, because the ntohs is really
+                        * just getting us a 0x1 or 0x100, which is then
+                        * converted by the compiler to a power of two shift
+                        * when used as a multiplier, which avoids the use of
+                        * ntohs on a variable, just to convert it back again.
+                        * This is done to allow the word by word computation
+                        * of the checksum to be done using the 32 bit scratch
+                        * area in paylen when computing the checksum.  Clear
+                        * as mud?
+                        */
+                       paylen = (__force u16)l4.udp->check;
+                       paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset);
+                       l4.udp->check = ~csum_fold((__force __wsum)paylen);
+               }
+
+               /* reset pointers to inner headers */
+               ip.hdr = skb_inner_network_header(skb);
+               l4.hdr = skb_inner_transport_header(skb);
+
+               /* initialize inner IP header fields */
+               if (ip.v4->version == 4) {
+                       ip.v4->tot_len = 0;
+                       ip.v4->check = 0;
+               } else {
+                       ip.v6->payload_len = 0;
+               }
        }
 
        }
 
-#ifdef HAVE_ENCAP_TSO_OFFLOAD
-       l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
-       *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data)
-                      : skb_transport_offset(skb);
-       *hdr_len += l4len;
-#else
-       l4len = tcp_hdrlen(skb);
-       *hdr_len = skb_transport_offset(skb) + l4len;
 #endif /* HAVE_ENCAP_TSO_OFFLOAD */
 #endif /* HAVE_ENCAP_TSO_OFFLOAD */
+       /* determine offset of inner transport header */
+       l4_offset = l4.hdr - skb->data;
+
+       /* remove payload length from inner checksum, see
+        * explanatory comment above
+        */
+       paylen = (__force u16)l4.tcp->check;
+       paylen += ntohs((__force __be16)1) * (u16)~(skb->len - l4_offset);
+       l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+
+       /* compute length of segmentation header */
+       *hdr_len = (l4.tcp->doff * 4) + l4_offset;
 
        /* find the field values */
        cd_cmd = I40E_TX_CTX_DESC_TSO;
        cd_tso_len = skb->len - *hdr_len;
        cd_mss = skb_shinfo(skb)->gso_size;
 
        /* find the field values */
        cd_cmd = I40E_TX_CTX_DESC_TSO;
        cd_tso_len = skb->len - *hdr_len;
        cd_mss = skb_shinfo(skb)->gso_size;
-       *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
-                               ((u64)cd_tso_len <<
-                                I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-                               ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+       *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+                               (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+                               (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
        return 1;
 }
 
        return 1;
 }
 
@@ -2406,6 +2509,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
  * @tx_flags: the collected send information
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
  * @tx_flags: the collected send information
+ * @cd_type_cmd_tso_mss: Quad Word 1
  *
  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
  **/
  *
  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
  **/
@@ -2432,7 +2536,8 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (!(pf->flags & I40E_FLAG_PTP))
                return 0;
 
        if (!(pf->flags & I40E_FLAG_PTP))
                return 0;
 
-       if (pf->ptp_tx && !pf->ptp_tx_skb) {
+       if (pf->ptp_tx &&
+           !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
 #ifdef SKB_SHARED_TX_IS_UNION
                skb_tx(skb)->in_progress = 1;
 #else
 #ifdef SKB_SHARED_TX_IS_UNION
                skb_tx(skb)->in_progress = 1;
 #else
@@ -2456,115 +2561,159 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
  * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
  * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
  * @cd_tunneling: ptr to context desc bits
  **/
  * @cd_tunneling: ptr to context desc bits
  **/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
-                               u32 *td_cmd, u32 *td_offset,
-                               struct i40e_ring *tx_ring,
-                               u32 *cd_tunneling)
+static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+                              u32 *td_cmd, u32 *td_offset,
+                              struct i40e_ring *tx_ring,
+                              u32 *cd_tunneling)
 {
 {
-       struct ipv6hdr *this_ipv6_hdr;
-       unsigned int this_tcp_hdrlen;
-       struct iphdr *this_ip_hdr;
-       u32 network_hdr_len;
-       u8 l4_hdr = 0;
-#ifdef HAVE_ENCAP_CSUM_OFFLOAD
-       u32 l4_tunnel = 0;
+       union {
+               struct iphdr *v4;
+               struct ipv6hdr *v6;
+               unsigned char *hdr;
+       } ip;
+       union {
+               struct tcphdr *tcp;
+               struct udphdr *udp;
+               unsigned char *hdr;
+       } l4;
+       unsigned char *exthdr;
+       u32 offset, cmd = 0;
+       __be16 frag_off;
+       u8 l4_proto = 0;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       ip.hdr = skb_network_header(skb);
+       l4.hdr = skb_transport_header(skb);
 
 
+       /* compute outer L2 header size */
+       offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
        if (skb->encapsulation) {
        if (skb->encapsulation) {
-               switch (ip_hdr(skb)->protocol) {
+               u32 tunnel = 0;
+               /* There seems to be an issue in some versions of kernel stack
+                * (e.g. v3.14) where the transport_header is getting set to
+                * the same value as the inner_transport_header which messes up
+                * the calculations used by the skbuff macros.  Here we find our
+                * own network_header_len and transport_header values.
+                */
+               int net_head_len = ip_hdr(skb)->ihl * 4;
+               int trans_head = skb->network_header + net_head_len;
+
+               if (skb->transport_header == skb->inner_transport_header)
+                       l4.hdr = skb->data + trans_head;
+               /* define outer network header type */
+               if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+                       tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+                                 I40E_TX_CTX_EXT_IP_IPV4 :
+                                 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+
+                       l4_proto = ip.v4->protocol;
+               } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+                       tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
+
+                       exthdr = ip.hdr + sizeof(*ip.v6);
+                       l4_proto = ip.v6->nexthdr;
+                       if (l4.hdr != exthdr)
+                               ipv6_skip_exthdr(skb, exthdr - skb->data,
+                                                &l4_proto, &frag_off);
+               }
+
+               /* compute outer L3 header size */
+               tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+                         I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+               /* switch IP header pointer from outer to inner header */
+               ip.hdr = skb_inner_network_header(skb);
+
+               /* define outer transport */
+               switch (l4_proto) {
                case IPPROTO_UDP:
                case IPPROTO_UDP:
-                       l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
-                       *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+                       tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
+                       *tx_flags |= I40E_TX_FLAGS_TUNNEL;
+                       break;
+#ifdef HAVE_GRE_ENCAP_OFFLOAD
+               case IPPROTO_GRE:
+                       tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
+                       *tx_flags |= I40E_TX_FLAGS_TUNNEL;
                        break;
                        break;
+#endif
                default:
                default:
-                       return;
+                       if (*tx_flags & I40E_TX_FLAGS_TSO)
+                               return -1;
+
+                       skb_checksum_help(skb);
+                       return 0;
                }
                }
-               network_hdr_len = skb_inner_network_header_len(skb);
-               this_ip_hdr = inner_ip_hdr(skb);
-               this_ipv6_hdr = inner_ipv6_hdr(skb);
-               this_tcp_hdrlen = inner_tcp_hdrlen(skb);
 
 
-               if (*tx_flags & I40E_TX_FLAGS_IPV4) {
-                       if (*tx_flags & I40E_TX_FLAGS_TSO) {
-                               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
-                               ip_hdr(skb)->check = 0;
 #ifdef I40E_ADD_PROBES
 #ifdef I40E_ADD_PROBES
+               if (*tx_flags & I40E_TX_FLAGS_IPV4)
+                       if (*tx_flags & I40E_TX_FLAGS_TSO)
                                tx_ring->vsi->back->tx_ip4_cso++;
 #endif
                                tx_ring->vsi->back->tx_ip4_cso++;
 #endif
-                       } else {
-                               *cd_tunneling |=
-                                        I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-                       }
-               } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
-                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-                       if (*tx_flags & I40E_TX_FLAGS_TSO)
-                               ip_hdr(skb)->check = 0;
-               }
-
-               /* Now set the ctx descriptor fields */
-               *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
-                                  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT      |
-                                  l4_tunnel                             |
-                                  ((skb_inner_network_offset(skb) -
-                                       skb_transport_offset(skb)) >> 1) <<
-                                  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-               if (this_ip_hdr->version == 6) {
-                       *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+               /* compute tunnel header size */
+               tunnel |= ((ip.hdr - l4.hdr) / 2) <<
+                         I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+               /* indicate if we need to offload outer UDP header */
+               if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+                   (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+                       tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+
+               /* record tunnel offload values */
+               *cd_tunneling |= tunnel;
+
+               /* switch L4 header pointer from outer to inner */
+               l4.hdr = skb_inner_transport_header(skb);
+               l4_proto = 0;
+
+               /* reset type as we transition from outer to inner headers */
+               *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
+               if (ip.v4->version == 4)
+                       *tx_flags |= I40E_TX_FLAGS_IPV4;
+               if (ip.v6->version == 6)
                        *tx_flags |= I40E_TX_FLAGS_IPV6;
                        *tx_flags |= I40E_TX_FLAGS_IPV6;
-               }
-
-       } else {
-               network_hdr_len = skb_network_header_len(skb);
-               this_ip_hdr = ip_hdr(skb);
-               this_ipv6_hdr = ipv6_hdr(skb);
-               this_tcp_hdrlen = tcp_hdrlen(skb);
        }
        }
-#else
-       network_hdr_len = skb_network_header_len(skb);
-       this_ip_hdr = ip_hdr(skb);
-       this_ipv6_hdr = ipv6_hdr(skb);
-       this_tcp_hdrlen = tcp_hdrlen(skb);
 #endif /* HAVE_ENCAP_CSUM_OFFLOAD */
 
        /* Enable IP checksum offloads */
        if (*tx_flags & I40E_TX_FLAGS_IPV4) {
 #endif /* HAVE_ENCAP_CSUM_OFFLOAD */
 
        /* Enable IP checksum offloads */
        if (*tx_flags & I40E_TX_FLAGS_IPV4) {
-               l4_hdr = this_ip_hdr->protocol;
+               l4_proto = ip.v4->protocol;
+#ifdef I40E_ADD_PROBES
+               tx_ring->vsi->back->tx_ip4_cso++;
+#endif
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
-               if (*tx_flags & I40E_TX_FLAGS_TSO) {
-                       *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
-                       this_ip_hdr->check = 0;
-#ifdef I40E_ADD_PROBES
-                       tx_ring->vsi->back->tx_ip4_cso++;
-#endif
-               } else {
-                       *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
-               }
-               /* Now set the td_offset for IP header length */
-               *td_offset = (network_hdr_len >> 2) <<
-                             I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+               cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+                      I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
+                      I40E_TX_DESC_CMD_IIPT_IPV4;
 #ifdef NETIF_F_IPV6_CSUM
        } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
 #ifdef NETIF_F_IPV6_CSUM
        } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
-               l4_hdr = this_ipv6_hdr->nexthdr;
-               *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
-               /* Now set the td_offset for IP header length */
-               *td_offset = (network_hdr_len >> 2) <<
-                             I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+               cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+
+               exthdr = ip.hdr + sizeof(*ip.v6);
+               l4_proto = ip.v6->nexthdr;
+               if (l4.hdr != exthdr)
+                       ipv6_skip_exthdr(skb, exthdr - skb->data,
+                                        &l4_proto, &frag_off);
 #endif
        }
 #endif
        }
-       /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
-       *td_offset |= (skb_network_offset(skb) >> 1) <<
-                      I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+       /* compute inner L3 header size */
+       offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
 
        /* Enable L4 checksum offloads */
 
        /* Enable L4 checksum offloads */
-       switch (l4_hdr) {
+       switch (l4_proto) {
        case IPPROTO_TCP:
                /* enable checksum offloads */
        case IPPROTO_TCP:
                /* enable checksum offloads */
-               *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
-               *td_offset |= (this_tcp_hdrlen >> 2) <<
-                      I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+               cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+               offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 #ifdef I40E_ADD_PROBES
                tx_ring->vsi->back->tx_tcp_cso++;
 #endif
 #ifdef I40E_ADD_PROBES
                tx_ring->vsi->back->tx_tcp_cso++;
 #endif
@@ -2572,9 +2721,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
        case IPPROTO_SCTP:
                /* enable SCTP checksum offload */
 #ifdef HAVE_SCTP
        case IPPROTO_SCTP:
                /* enable SCTP checksum offload */
 #ifdef HAVE_SCTP
-               *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
-               *td_offset |= (sizeof(struct sctphdr) >> 2) <<
-                              I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+               cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+               offset |= (sizeof(struct sctphdr) >> 2) <<
+                         I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 #ifdef I40E_ADD_PROBES
                        tx_ring->vsi->back->tx_sctp_cso++;
 #endif
 #ifdef I40E_ADD_PROBES
                        tx_ring->vsi->back->tx_sctp_cso++;
 #endif
@@ -2582,16 +2731,24 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                break;
        case IPPROTO_UDP:
                /* enable UDP checksum offload */
                break;
        case IPPROTO_UDP:
                /* enable UDP checksum offload */
-               *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
-               *td_offset |= (sizeof(struct udphdr) >> 2) <<
-                              I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+               cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+               offset |= (sizeof(struct udphdr) >> 2) <<
+                         I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 #ifdef I40E_ADD_PROBES
                        tx_ring->vsi->back->tx_udp_cso++;
 #endif
                break;
        default:
 #ifdef I40E_ADD_PROBES
                        tx_ring->vsi->back->tx_udp_cso++;
 #endif
                break;
        default:
-               break;
+               if (*tx_flags & I40E_TX_FLAGS_TSO)
+                       return -1;
+               skb_checksum_help(skb);
+               return 0;
        }
        }
+
+       *td_cmd |= cmd;
+       *td_offset |= offset;
+
+       return 1;
 }
 
 /**
 }
 
 /**
@@ -2625,62 +2782,6 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
        context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
-/**
- * i40e_chk_linearize - Check if there are more than 8 fragments per packet
- * @skb:      send buffer
- * @tx_flags: collected send information
- *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
- **/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
-{
-       struct skb_frag_struct *frag;
-       bool linearize = false;
-       unsigned int size = 0;
-       u16 num_frags;
-       u16 gso_segs;
-
-       num_frags = skb_shinfo(skb)->nr_frags;
-       gso_segs = skb_shinfo(skb)->gso_segs;
-
-       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
-               u16 j = 0;
-
-               if (num_frags < (I40E_MAX_BUFFER_TXD))
-                       goto linearize_chk_done;
-               /* try the simple math, if we have too many frags per segment */
-               if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
-                                                       I40E_MAX_BUFFER_TXD) {
-                       linearize = true;
-                       goto linearize_chk_done;
-               }
-               frag = &skb_shinfo(skb)->frags[0];
-               /* we might still have more fragments per segment */
-               do {
-                       size += skb_frag_size(frag);
-                       frag++; j++;
-                       if ((size >= skb_shinfo(skb)->gso_size) &&
-                           (j < I40E_MAX_BUFFER_TXD)) {
-                               size = (size % skb_shinfo(skb)->gso_size);
-                               j = (size) ? 1 : 0;
-                       }
-                       if (j == I40E_MAX_BUFFER_TXD) {
-                               linearize = true;
-                               break;
-                       }
-                       num_frags--;
-               } while (num_frags);
-       } else {
-               if (num_frags >= I40E_MAX_BUFFER_TXD)
-                       linearize = true;
-       }
-
-linearize_chk_done:
-       return linearize;
-}
-
 /**
  * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
  * @tx_ring: the ring to be checked
 /**
  * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
  * @tx_ring: the ring to be checked
@@ -2688,7 +2789,7 @@ linearize_chk_done:
  *
  * Returns -EBUSY if a stop is needed, else 0
  **/
  *
  * Returns -EBUSY if a stop is needed, else 0
  **/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 {
        netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Memory barrier before checking head and tail */
 {
        netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Memory barrier before checking head and tail */
@@ -2705,21 +2806,70 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 }
 
 /**
 }
 
 /**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
+ * @skb:      send buffer
+ *
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
  *
  *
- * Returns 0 if stop is not needed
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
  **/
  **/
-#ifdef I40E_FCOE
-inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-#else
-static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-#endif
+bool __i40e_chk_linearize(struct sk_buff *skb)
 {
 {
-       if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
-               return 0;
-       return __i40e_maybe_stop_tx(tx_ring, size);
+       const struct skb_frag_struct *frag, *stale;
+       int nr_frags, sum;
+
+       /* no need to check if number of frags is less than 7 */
+       nr_frags = skb_shinfo(skb)->nr_frags;
+       if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
+               return false;
+
+       /* We need to walk through the list and validate that each group
+        * of 6 fragments totals at least gso_size.  However we don't need
+        * to perform such validation on the last 6 since the last 6 cannot
+        * inherit any data from a descriptor after them.
+        */
+       nr_frags -= I40E_MAX_BUFFER_TXD - 2;
+       frag = &skb_shinfo(skb)->frags[0];
+
+       /* Initialize size to the negative value of gso_size minus 1.  We
+        * use this as the worst case scenerio in which the frag ahead
+        * of us only provides one byte which is why we are limited to 6
+        * descriptors for a single transmit as the header and previous
+        * fragment are already consuming 2 descriptors.
+        */
+       sum = 1 - skb_shinfo(skb)->gso_size;
+
+       /* Add size of frags 0 through 4 to create our initial sum */
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+
+       /* Walk through fragments adding latest fragment, testing it, and
+        * then removing stale fragments from the sum.
+        */
+       stale = &skb_shinfo(skb)->frags[0];
+       for (;;) {
+               sum += skb_frag_size(frag++);
+
+               /* if sum is negative we failed to make sufficient progress */
+               if (sum < 0)
+                       return true;
+
+               /* use pre-decrement to avoid processing last fragment */
+               if (!--nr_frags)
+                       break;
+
+               sum -= skb_frag_size(stale++);
+       }
+
+       return false;
 }
 
 /**
 }
 
 /**
@@ -2742,15 +2892,18 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                               const u8 hdr_len, u32 td_cmd, u32 td_offset)
 #endif
 {
                               const u8 hdr_len, u32 td_cmd, u32 td_offset)
 #endif
 {
+       unsigned int data_len = skb->data_len;
+       unsigned int size = skb_headlen(skb);
        struct skb_frag_struct *frag;
        struct i40e_tx_buffer *tx_bi;
        struct i40e_tx_desc *tx_desc;
        u16 i = tx_ring->next_to_use;
        struct skb_frag_struct *frag;
        struct i40e_tx_buffer *tx_bi;
        struct i40e_tx_desc *tx_desc;
        u16 i = tx_ring->next_to_use;
-       unsigned int data_len;
-       unsigned int size;
        u32 td_tag = 0;
        dma_addr_t dma;
        u16 gso_segs;
        u32 td_tag = 0;
        dma_addr_t dma;
        u16 gso_segs;
+       u16 desc_count = 0;
+       bool tail_bump = true;
+       bool do_rs = false;
 
        if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
                td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
 
        if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
                td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2768,9 +2921,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                tx_ring->vsi->back->tcp_segs += gso_segs;
 
 #endif
                tx_ring->vsi->back->tcp_segs += gso_segs;
 
 #endif
-       data_len = skb->data_len;
-       size = skb_headlen(skb);
-
        /* multiply data chunks by size of headers */
        first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
        first->gso_segs = gso_segs;
        /* multiply data chunks by size of headers */
        first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
        first->gso_segs = gso_segs;
@@ -2783,6 +2933,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        tx_bi = first;
 
        for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
        tx_bi = first;
 
        for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+               unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
                if (dma_mapping_error(tx_ring->dev, dma))
                        goto dma_error;
 
                if (dma_mapping_error(tx_ring->dev, dma))
                        goto dma_error;
 
@@ -2790,25 +2942,28 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                dma_unmap_len_set(tx_bi, len, size);
                dma_unmap_addr_set(tx_bi, dma, dma);
 
                dma_unmap_len_set(tx_bi, len, size);
                dma_unmap_addr_set(tx_bi, dma, dma);
 
+               /* align size to end of page */
+               max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
                tx_desc->buffer_addr = cpu_to_le64(dma);
 
                while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
                        tx_desc->cmd_type_offset_bsz =
                                build_ctob(td_cmd, td_offset,
                tx_desc->buffer_addr = cpu_to_le64(dma);
 
                while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
                        tx_desc->cmd_type_offset_bsz =
                                build_ctob(td_cmd, td_offset,
-                                          I40E_MAX_DATA_PER_TXD, td_tag);
+                                          max_data, td_tag);
 
                        tx_desc++;
                        i++;
 
                        tx_desc++;
                        i++;
+                       desc_count++;
+
                        if (i == tx_ring->count) {
                                tx_desc = I40E_TX_DESC(tx_ring, 0);
                                i = 0;
                        }
                        if (i == tx_ring->count) {
                                tx_desc = I40E_TX_DESC(tx_ring, 0);
                                i = 0;
                        }
-                       tx_bi = &tx_ring->tx_bi[i];
-                       memset(tx_bi, 0, sizeof(struct i40e_tx_buffer));
 
 
-                       dma += I40E_MAX_DATA_PER_TXD;
-                       size -= I40E_MAX_DATA_PER_TXD;
+                       dma += max_data;
+                       size -= max_data;
 
 
+                       max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
                        tx_desc->buffer_addr = cpu_to_le64(dma);
                }
 
                        tx_desc->buffer_addr = cpu_to_le64(dma);
                }
 
@@ -2820,6 +2975,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
                tx_desc++;
                i++;
 
                tx_desc++;
                i++;
+               desc_count++;
+
                if (i == tx_ring->count) {
                        tx_desc = I40E_TX_DESC(tx_ring, 0);
                        i = 0;
                if (i == tx_ring->count) {
                        tx_desc = I40E_TX_DESC(tx_ring, 0);
                        i = 0;
@@ -2832,48 +2989,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                                       DMA_TO_DEVICE);
 
                tx_bi = &tx_ring->tx_bi[i];
                                       DMA_TO_DEVICE);
 
                tx_bi = &tx_ring->tx_bi[i];
-               memset(tx_bi, 0, sizeof(struct i40e_tx_buffer));
        }
 
        }
 
-       /* Place RS bit on last descriptor of any packet that spans across the
-        * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
-        */
-#ifdef HAVE_SKB_XMIT_MORE
-       if (skb->xmit_more  &&
-           ((tx_ring->packet_stride & WB_STRIDE) != WB_STRIDE) &&
-           (first <= &tx_ring->tx_bi[i]) &&
-           (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
-               tx_ring->packet_stride++;
-               tx_desc->cmd_type_offset_bsz =
-                       build_ctob(td_cmd, td_offset, size, td_tag) |
-                       cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
-                                        I40E_TXD_QW1_CMD_SHIFT);
-       } else {
-               tx_ring->packet_stride = 0;
-               tx_desc->cmd_type_offset_bsz =
-                       build_ctob(td_cmd, td_offset, size, td_tag) |
-                       cpu_to_le64((u64)I40E_TXD_CMD <<
-                                        I40E_TXD_QW1_CMD_SHIFT);
-       }
-#else
-       tx_desc->cmd_type_offset_bsz =
-               build_ctob(td_cmd, td_offset, size, td_tag) |
-               cpu_to_le64((u64)I40E_TXD_CMD <<
-                                I40E_TXD_QW1_CMD_SHIFT);
-
-#endif
-
-       netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
-                                                tx_ring->queue_index),
-                            first->bytecount);
-
-       /* Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
-        */
-       wmb();
-
        /* set next_to_watch value indicating a packet is present */
        first->next_to_watch = tx_desc;
 
        /* set next_to_watch value indicating a packet is present */
        first->next_to_watch = tx_desc;
 
@@ -2883,19 +3000,90 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        tx_ring->next_to_use = i;
 
 
        tx_ring->next_to_use = i;
 
+       netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                tx_ring->queue_index),
+                                                first->bytecount);
        i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
        i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
-       /* notify HW of packet */
+
+       /* Algorithm to optimize tail and RS bit setting:
+        * if xmit_more is supported
+        *      if xmit_more is true
+        *              do not update tail and do not mark RS bit.
+        *      if xmit_more is false and last xmit_more was false
+        *              if every packet spanned less than 4 desc
+        *                      then set RS bit on 4th packet and update tail
+        *                      on every packet
+        *              else
+        *                      update tail and set RS bit on every packet.
+        *      if xmit_more is false and last_xmit_more was true
+        *              update tail and set RS bit.
+        * else (kernel < 3.18)
+        *      if every packet spanned less than 4 desc
+        *              then set RS bit on 4th packet and update tail
+        *              on every packet
+        *      else
+        *              set RS bit on EOP for every packet and update tail
+        *
+        * Optimization: wmb to be issued only in case of tail update.
+        * Also optimize the Descriptor WB path for RS bit with the same
+        * algorithm.
+        *
+        * Note: If there are less than 4 packets
+        * pending and interrupts were disabled the service task will
+        * trigger a force WB.
+        */
 #ifdef HAVE_SKB_XMIT_MORE
 #ifdef HAVE_SKB_XMIT_MORE
-       if (!skb->xmit_more ||
-           netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
-                                                  tx_ring->queue_index)))
-               writel(i, tx_ring->tail);
+       if (skb->xmit_more  &&
+           !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+                                                   tx_ring->queue_index))) {
+               tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+               tail_bump = false;
+       } else if (!skb->xmit_more &&
+                  !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+                                                      tx_ring->queue_index)) &&
+                  (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
+                  (tx_ring->packet_stride < WB_STRIDE) &&
+                  (desc_count < WB_STRIDE)) {
+               tx_ring->packet_stride++;
+       } else {
+               tx_ring->packet_stride = 0;
+               tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+               do_rs = true;
+       }
+#else
+       if ((tx_ring->packet_stride < WB_STRIDE) &&
+           (desc_count < WB_STRIDE) &&
+           !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+                                                   tx_ring->queue_index)))
+               tx_ring->packet_stride++;
        else
        else
+               do_rs = true;
+#endif /* HAVE_SKB_XMIT_MORE */
+       if (do_rs)
+               tx_ring->packet_stride = 0;
+
+       tx_desc->cmd_type_offset_bsz =
+                       build_ctob(td_cmd, td_offset, size, td_tag) |
+                       cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
+                                                 I40E_TX_DESC_CMD_EOP) <<
+                                                 I40E_TXD_QW1_CMD_SHIFT);
+
+       /* notify HW of packet */
+#ifdef HAVE_SKB_XMIT_MORE
+       if (!tail_bump)
                prefetchw(tx_desc + 1);
                prefetchw(tx_desc + 1);
-#else
-       writel(i, tx_ring->tail);
 #endif /* HAVE_XMIT_MORE */
 
 #endif /* HAVE_XMIT_MORE */
 
+       if (tail_bump) {
+               /* Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.  (Only
+                * applicable for weak-ordered memory model archs,
+                * such as IA-64).
+                */
+               wmb();
+               writel(i, tx_ring->tail);
+       }
+
        return;
 
 dma_error:
        return;
 
 dma_error:
@@ -2915,43 +3103,6 @@ dma_error:
        tx_ring->next_to_use = i;
 }
 
        tx_ring->next_to_use = i;
 }
 
-/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
- * @skb:     send buffer
- * @tx_ring: ring to send buffer on
- *
- * Returns number of data descriptors needed for this skb. Returns 0 to indicate
- * there is not enough descriptors available in this ring since we need at least
- * one descriptor.
- **/
-#ifdef I40E_FCOE
-inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
-                                     struct i40e_ring *tx_ring)
-#else
-static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
-                                            struct i40e_ring *tx_ring)
-#endif
-{
-       unsigned int f;
-       int count = 0;
-
-       /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
-        *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
-        *       + 4 desc gap to avoid the cache line where head is,
-        *       + 1 desc for context descriptor,
-        * otherwise try next time
-        */
-       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-
-       count += TXD_USE_COUNT(skb_headlen(skb));
-       if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
-               tx_ring->tx_stats.tx_busy++;
-               return 0;
-       }
-       return count;
-}
-
 #if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
 /**
  * i40e_lan_select_queue - Select the right Tx queue for the skb for LAN VSI
 #if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
 /**
  * i40e_lan_select_queue - Select the right Tx queue for the skb for LAN VSI
@@ -2984,20 +3135,39 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        __be16 protocol;
        u32 td_cmd = 0;
        u8 hdr_len = 0;
        __be16 protocol;
        u32 td_cmd = 0;
        u8 hdr_len = 0;
+       int tso, count;
 #ifdef HAVE_PTP_1588_CLOCK
        int tsyn;
 #endif /* HAVE_PTP_1588_CLOCK */
 #ifdef HAVE_PTP_1588_CLOCK
        int tsyn;
 #endif /* HAVE_PTP_1588_CLOCK */
-       int tso;
 
 
-       if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+       /* prefetch the data, we'll need it later */
+       prefetch(skb->data);
+
+       count = i40e_xmit_descriptor_count(skb);
+       if (i40e_chk_linearize(skb, count)) {
+               if (__skb_linearize(skb))
+                       goto out_drop;
+               count = i40e_txd_use_count(skb->len);
+               tx_ring->tx_stats.tx_linearize++;
+       }
+
+       /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+        *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+        *       + 4 desc gap to avoid the cache line where head is,
+        *       + 1 desc for context descriptor,
+        * otherwise try next time
+        */
+       if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+               tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
                return NETDEV_TX_BUSY;
+       }
 
        /* prepare the xmit flags */
        if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
                goto out_drop;
 
        /* obtain protocol of skb */
 
        /* prepare the xmit flags */
        if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
                goto out_drop;
 
        /* obtain protocol of skb */
-       protocol = skb->protocol;
+       protocol = vlan_get_protocol(skb);
 
        /* record the location of the first descriptor for this packet */
        first = &tx_ring->tx_bi[tx_ring->next_to_use];
 
        /* record the location of the first descriptor for this packet */
        first = &tx_ring->tx_bi[tx_ring->next_to_use];
@@ -3008,20 +3178,18 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
 
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
 
-       tso = i40e_tso(tx_ring, skb, &hdr_len,
-                      &cd_type_cmd_tso_mss, &cd_tunneling);
+       tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
 
        if (tso < 0)
                goto out_drop;
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
 
        if (tso < 0)
                goto out_drop;
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
-       if (i40e_chk_linearize(skb, tx_flags)) {
-               if (skb_linearize(skb))
-                       goto out_drop;
-               tx_ring->tx_stats.tx_linearize++;
-       }
-       skb_tx_timestamp(skb);
+       /* Always offload the checksum, since it's in the data descriptor */
+       tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
+                                 tx_ring, &cd_tunneling);
+       if (tso < 0)
+               goto out_drop;
 
 #ifdef HAVE_PTP_1588_CLOCK
        tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
 
 #ifdef HAVE_PTP_1588_CLOCK
        tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
@@ -3030,17 +3198,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
                tx_flags |= I40E_TX_FLAGS_TSYN;
 
 #endif /* HAVE_PTP_1588_CLOCK */
                tx_flags |= I40E_TX_FLAGS_TSYN;
 
 #endif /* HAVE_PTP_1588_CLOCK */
+       skb_tx_timestamp(skb);
+
        /* always enable CRC insertion offload */
        td_cmd |= I40E_TX_DESC_CMD_ICRC;
 
        /* always enable CRC insertion offload */
        td_cmd |= I40E_TX_DESC_CMD_ICRC;
 
-       /* Always offload the checksum, since it's in the data descriptor */
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               tx_flags |= I40E_TX_FLAGS_CSUM;
-
-               i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
-                                   tx_ring, &cd_tunneling);
-       }
-
        i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
                           cd_tunneling, cd_l2tag2);
 
        i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
                           cd_tunneling, cd_l2tag2);
 
@@ -3048,7 +3210,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
         *
         * NOTE: this must always be directly before the data descriptor.
         */
         *
         * NOTE: this must always be directly before the data descriptor.
         */
-       i40e_atr(tx_ring, skb, tx_flags, protocol);
+       i40e_atr(tx_ring, skb, tx_flags);
 
        i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
                    td_cmd, td_offset);
 
        i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
                    td_cmd, td_offset);
@@ -3079,12 +3241,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        /* hardware can't handle really short frames, hardware padding works
         * beyond this point
         */
        /* hardware can't handle really short frames, hardware padding works
         * beyond this point
         */
-       if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
-               if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
-                       return NETDEV_TX_OK;
-               skb->len = I40E_MIN_TX_LEN;
-               skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
-       }
+       if (skb_put_padto(skb, I40E_MIN_TX_LEN))
+               return NETDEV_TX_OK;
 
        return i40e_xmit_frame_ring(skb, tx_ring);
 }
 
        return i40e_xmit_frame_ring(skb, tx_ring);
 }
similarity index 62%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_txrx.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_txrx.h
index dc4ed1da74debb1e72624b378be56aa7bc8a64b8..2f2d378fd4a0081a8498251f970293d19caf6e29 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -90,10 +87,20 @@ enum i40e_dyn_idx_t {
        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
        BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
        BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
-#define i40e_pf_get_default_rss_hena(pf) I40E_DEFAULT_RSS_HENA
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+       (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+         I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
 
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512   512    /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256   256
 #define I40E_RXBUFFER_2048  2048
 #define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
 #define I40E_RXBUFFER_4096  4096
 #define I40E_RXBUFFER_2048  2048
 #define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
 #define I40E_RXBUFFER_4096  4096
@@ -104,9 +111,28 @@ enum i40e_dyn_idx_t {
  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
  * this adds up to 512 bytes of extra data meaning the smallest allocation
  * we could have is 1K.
  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
  * this adds up to 512 bytes of extra data meaning the smallest allocation
  * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
  */
  */
-#define I40E_RX_HDR_SIZE  I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+                                    const u64 stat_err_bits)
+{
+       return !!(rx_desc->wb.qword1.status_error_len &
+                 cpu_to_le64(stat_err_bits));
+}
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define I40E_RX_BUFFER_WRITE   16      /* Must be power of 2 */
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define I40E_RX_BUFFER_WRITE   16      /* Must be power of 2 */
@@ -132,18 +158,44 @@ enum i40e_dyn_idx_t {
                prefetch((n));                          \
        } while (0)
 
                prefetch((n));                          \
        } while (0)
 
-#define i40e_rx_desc i40e_32byte_rx_desc
-
 #define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
 #define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
-#define I40E_MAX_DATA_PER_TXD  8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE         4096
+#define I40E_MAX_DATA_PER_TXD          (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+       (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED.  It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive.  It is used to
+ * approximate the number of descriptors used per linear buffer.  Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+       const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+       const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+       unsigned int adjust = ~(u32)0;
+
+       /* if we rounded up on the reciprocal pull down the adjustment */
+       if ((max * reciprocal) > adjust)
+               adjust = ~(u32)(reciprocal - 1);
+
+       return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
 
 /* Tx Descriptors needed, worst case */
 
 /* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-#define I40E_MIN_DESC_PENDING   4
+#define I40E_MIN_DESC_PENDING  4
 
 
-#define I40E_TX_FLAGS_CSUM             BIT(0)
 #define I40E_TX_FLAGS_HW_VLAN          BIT(1)
 #define I40E_TX_FLAGS_SW_VLAN          BIT(2)
 #define I40E_TX_FLAGS_TSO              BIT(3)
 #define I40E_TX_FLAGS_HW_VLAN          BIT(1)
 #define I40E_TX_FLAGS_SW_VLAN          BIT(2)
 #define I40E_TX_FLAGS_TSO              BIT(3)
@@ -155,7 +207,7 @@ enum i40e_dyn_idx_t {
 #define I40E_TX_FLAGS_TSYN             BIT(8)
 #endif /* HAVE_PTP_1588_CLOCK */
 #define I40E_TX_FLAGS_FD_SB            BIT(9)
 #define I40E_TX_FLAGS_TSYN             BIT(8)
 #endif /* HAVE_PTP_1588_CLOCK */
 #define I40E_TX_FLAGS_FD_SB            BIT(9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL     BIT(10)
+#define I40E_TX_FLAGS_TUNNEL           BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -177,10 +229,8 @@ struct i40e_tx_buffer {
 
 struct i40e_rx_buffer {
        struct sk_buff *skb;
 
 struct i40e_rx_buffer {
        struct sk_buff *skb;
-       void *hdr_buf;
        dma_addr_t dma;
        struct page *page;
        dma_addr_t dma;
        struct page *page;
-       dma_addr_t page_dma;
        unsigned int page_offset;
 };
 
        unsigned int page_offset;
 };
 
@@ -194,41 +244,33 @@ struct i40e_tx_queue_stats {
        u64 tx_busy;
        u64 tx_done_old;
        u64 tx_linearize;
        u64 tx_busy;
        u64 tx_done_old;
        u64 tx_linearize;
+       u64 tx_force_wb;
+       u64 tx_lost_interrupt;
 };
 
 struct i40e_rx_queue_stats {
        u64 non_eop_descs;
        u64 alloc_page_failed;
        u64 alloc_buff_failed;
 };
 
 struct i40e_rx_queue_stats {
        u64 non_eop_descs;
        u64 alloc_page_failed;
        u64 alloc_buff_failed;
+       u64 page_reuse_count;
+       u64 realloc_count;
 };
 
 enum i40e_ring_state_t {
        __I40E_TX_FDIR_INIT_DONE,
        __I40E_TX_XPS_INIT_DONE,
 };
 
 enum i40e_ring_state_t {
        __I40E_TX_FDIR_INIT_DONE,
        __I40E_TX_XPS_INIT_DONE,
-       __I40E_TX_DETECT_HANG,
-       __I40E_HANG_CHECK_ARMED,
-       __I40E_RX_PS_ENABLED,
-       __I40E_RX_16BYTE_DESC_ENABLED,
 };
 
 };
 
-#define ring_is_ps_enabled(ring) \
-       test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
-       set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
-       clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define check_for_tx_hang(ring) \
-       test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
-       set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
-       clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
-       test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
-       set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
-       clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT      0
+#define I40E_RX_DTYPE_HEADER_SPLIT  1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
+#define I40E_RX_SPLIT_L2      0x1
+#define I40E_RX_SPLIT_IP      0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP    0x8
 
 /* struct that defines a descriptor ring, associated with a VSI */
 struct i40e_ring {
 
 /* struct that defines a descriptor ring, associated with a VSI */
 struct i40e_ring {
@@ -247,17 +289,7 @@ struct i40e_ring {
 
        u16 count;                      /* Number of descriptors */
        u16 reg_idx;                    /* HW register index of the ring */
 
        u16 count;                      /* Number of descriptors */
        u16 reg_idx;                    /* HW register index of the ring */
-       u16 rx_hdr_len;
        u16 rx_buf_len;
        u16 rx_buf_len;
-       u8  dtype;
-#define I40E_RX_DTYPE_NO_SPLIT      0
-#define I40E_RX_DTYPE_HEADER_SPLIT  1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
-       u8  hsplit;
-#define I40E_RX_SPLIT_L2      0x1
-#define I40E_RX_SPLIT_IP      0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP    0x8
 
        /* used in interrupt processing */
        u16 next_to_use;
 
        /* used in interrupt processing */
        u16 next_to_use;
@@ -272,7 +304,11 @@ struct i40e_ring {
 #endif /* HAVE_PTP_1588_CLOCK */
        bool ring_active;               /* is ring online or not */
        bool arm_wb;            /* do something to arm write back */
 #endif /* HAVE_PTP_1588_CLOCK */
        bool ring_active;               /* is ring online or not */
        bool arm_wb;            /* do something to arm write back */
-       u16  packet_stride;
+       u8 packet_stride;
+
+       u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR       BIT(0)
+#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
 
        /* stats structs */
        struct i40e_queue_stats stats;
 
        /* stats structs */
        struct i40e_queue_stats stats;
@@ -291,6 +327,7 @@ struct i40e_ring {
        struct i40e_q_vector *q_vector; /* Backreference to associated vector */
 
        struct rcu_head rcu;            /* to avoid race on free */
        struct i40e_q_vector *q_vector; /* Backreference to associated vector */
 
        struct rcu_head rcu;            /* to avoid race on free */
+       u16 next_to_alloc;
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
@@ -314,50 +351,7 @@ struct i40e_ring_container {
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
 #define i40e_for_each_ring(pos, head) \
        for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-#define napi_hash_del(n)
-#define napi_hash_add(n)
-static inline void skb_mark_napi_id(struct sk_buff *skb,
-                                   struct napi_struct *napi)
-{
-}
-
-static inline void i40e_qv_init_lock(struct i40e_q_vector *q_vector)
-{
-}
-
-static inline bool i40e_qv_lock_napi(struct i40e_q_vector *q_vector)
-{
-       return true;
-}
-
-static inline bool i40e_qv_unlock_napi(struct i40e_q_vector *q_vector)
-{
-       return false;
-}
-
-static inline bool i40e_qv_lock_poll(struct i40e_q_vector *q_vector)
-{
-       return false;
-}
-
-static inline bool i40e_qv_unlock_poll(struct i40e_q_vector *q_vector)
-{
-       return false;
-}
-
-static inline bool i40e_qv_busy_polling(struct i40e_q_vector *q_vector)
-{
-       return false;
-}
-
-static inline bool i40e_qv_disable(struct i40e_q_vector *q_vector)
-{
-       return true;
-}
-
-void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 #if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
 extern u16 i40e_lan_select_queue(struct net_device *netdev,
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 #if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
 extern u16 i40e_lan_select_queue(struct net_device *netdev,
@@ -371,15 +365,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring);
 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
 int i40e_napi_poll(struct napi_struct *napi, int budget);
 #ifdef I40E_FCOE
 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
 int i40e_napi_poll(struct napi_struct *napi, int budget);
 #ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *, struct sk_buff *, struct i40e_tx_buffer *,
-                u32, const u8, u32, u32);
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
-int i40e_xmit_descriptor_count(struct sk_buff *, struct i40e_ring *);
-int i40e_tx_prepare_vlan_flags(struct sk_buff *, struct i40e_ring *,
-                              u32 *flags);
+void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                struct i40e_tx_buffer *first, u32 tx_flags,
+                const u8 hdr_len, u32 td_cmd, u32 td_offset);
+int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+                              struct i40e_ring *tx_ring, u32 *flags);
 #endif
 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
 #endif
 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
-u32 i40e_get_tx_pending(struct i40e_ring *ring);
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
+bool __i40e_chk_linearize(struct sk_buff *skb);
 
 /**
  * i40e_get_head - Retrieve head from head writeback
 
 /**
  * i40e_get_head - Retrieve head from head writeback
@@ -394,4 +389,77 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
 
        return le32_to_cpu(*(volatile __le32 *)head);
 }
 
        return le32_to_cpu(*(volatile __le32 *)head);
 }
+
+/**
+ * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
+ * @ptype: the packet type field from rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+       return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+              (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
+{
+       const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+       int count = 0, size = skb_headlen(skb);
+
+       for (;;) {
+               count += i40e_txd_use_count(size);
+
+               if (!nr_frags--)
+                       break;
+
+               size = skb_frag_size(frag++);
+       }
+
+       return count;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+       if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+               return 0;
+       return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @count:    number of buffers used
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
+{
+       /* Both TSO and single send will work if count is less than 8 */
+       if (likely(count < I40E_MAX_BUFFER_TXD))
+               return false;
+
+       if (skb_is_gso(skb))
+               return __i40e_chk_linearize(skb);
+
+       /* we can support up to 8 data buffers for a single send */
+       return count != I40E_MAX_BUFFER_TXD;
+}
 #endif /* _I40E_TXRX_H_ */
 #endif /* _I40E_TXRX_H_ */
similarity index 92%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_type.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_type.h
index 3e04d4e9f342b62ce68027c61fff3b0ccecaad12..d904babfa66ec7955f20f7e69f87e19deb7a6ad4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -55,7 +52,7 @@
 #define I40E_MAX_NVM_TIMEOUT           18000
 
 /* Check whether address is multicast. */
 #define I40E_MAX_NVM_TIMEOUT           18000
 
 /* Check whether address is multicast. */
-#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+#define is_multicast_ether_addr(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
 
 /* Check whether an address is broadcast. */
 #define I40E_IS_BROADCAST(address)     \
 
 /* Check whether an address is broadcast. */
 #define I40E_IS_BROADCAST(address)     \
@@ -129,6 +126,22 @@ enum i40e_debug_mask {
 #define I40E_PCI_LINK_SPEED_5000       0x2
 #define I40E_PCI_LINK_SPEED_8000       0x3
 
 #define I40E_PCI_LINK_SPEED_5000       0x2
 #define I40E_PCI_LINK_SPEED_8000       0x3
 
+#define I40E_MDIO_STCODE               0
+#define I40E_MDIO_OPCODE_ADDRESS       0
+#define I40E_MDIO_OPCODE_WRITE         I40E_MASK(1, \
+                                                 I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+                                                 I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_OPCODE_READ          I40E_MASK(3, \
+                                                 I40E_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define I40E_PHY_COM_REG_PAGE                  0x1E
+#define I40E_PHY_LED_LINK_MODE_MASK            0xF0
+#define I40E_PHY_LED_MANUAL_ON                 0x100
+#define I40E_PHY_LED_PROV_REG_1                        0xC430
+#define I40E_PHY_LED_MODE_MASK                 0xFFFF
+#define I40E_PHY_LED_MODE_ORIG                 0x80000000
+
 /* Memory types */
 enum i40e_memset_type {
        I40E_NONDMA_MEM = 0,
 /* Memory types */
 enum i40e_memset_type {
        I40E_NONDMA_MEM = 0,
@@ -143,6 +156,9 @@ enum i40e_memcpy_type {
        I40E_DMA_TO_NONDMA
 };
 
        I40E_DMA_TO_NONDMA
 };
 
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0005
+
 /* These are structs for managing the hardware information and the operations.
  * The structures of function pointers are filled out at init time when we
  * know for sure exactly which hardware we're working with.  This gives us the
 /* These are structs for managing the hardware information and the operations.
  * The structures of function pointers are filled out at init time when we
  * know for sure exactly which hardware we're working with.  This gives us the
@@ -156,6 +172,8 @@ enum i40e_mac_type {
        I40E_MAC_X710,
        I40E_MAC_XL710,
        I40E_MAC_VF,
        I40E_MAC_X710,
        I40E_MAC_XL710,
        I40E_MAC_VF,
+       I40E_MAC_X722,
+       I40E_MAC_X722_VF,
        I40E_MAC_GENERIC,
 };
 
        I40E_MAC_GENERIC,
 };
 
@@ -282,6 +300,15 @@ struct i40e_phy_info {
 #define I40E_HW_CAP_MDIO_PORT_MODE_MDIO                0
 #define I40E_HW_CAP_MDIO_PORT_MODE_I2C         1
 
 #define I40E_HW_CAP_MDIO_PORT_MODE_MDIO                0
 #define I40E_HW_CAP_MDIO_PORT_MODE_I2C         1
 
+enum i40e_acpi_programming_method {
+       I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+       I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define I40E_WOL_SUPPORT_MASK                  1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK      (1 << 1)
+#define I40E_PROXY_SUPPORT_MASK                        (1 << 2)
+
 /* Capabilities of a PF or a VF or the whole device */
 struct i40e_hw_capabilities {
        u32  switch_mode;
 /* Capabilities of a PF or a VF or the whole device */
 struct i40e_hw_capabilities {
        u32  switch_mode;
@@ -311,6 +338,11 @@ struct i40e_hw_capabilities {
 #define I40E_FLEX10_STATUS_DCC_ERROR   0x1
 #define I40E_FLEX10_STATUS_VC_MODE     0x2
 
 #define I40E_FLEX10_STATUS_DCC_ERROR   0x1
 #define I40E_FLEX10_STATUS_VC_MODE     0x2
 
+       bool sec_rev_disabled;
+       bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED  0x2
+
        bool mgmt_cem;
        bool ieee_1588;
        bool iwarp;
        bool mgmt_cem;
        bool ieee_1588;
        bool iwarp;
@@ -340,6 +372,9 @@ struct i40e_hw_capabilities {
        u32 enabled_tcmap;
        u32 maxtc;
        u64 wr_csr_prot;
        u32 enabled_tcmap;
        u32 maxtc;
        u64 wr_csr_prot;
+       bool apm_wol_support;
+       enum i40e_acpi_programming_method acpi_prog_method;
+       bool proxy_support;
 };
 
 struct i40e_mac_info {
 };
 
 struct i40e_mac_info {
@@ -585,6 +620,8 @@ struct i40e_hw {
        enum i40e_nvmupd_state nvmupd_state;
        struct i40e_aq_desc nvm_wb_desc;
        struct i40e_virt_mem nvm_buff;
        enum i40e_nvmupd_state nvmupd_state;
        struct i40e_aq_desc nvm_wb_desc;
        struct i40e_virt_mem nvm_buff;
+       bool nvm_release_on_done;
+       u16 nvm_wait_opcode;
 
        /* HMC info */
        struct i40e_hmc_info hmc; /* HMC info struct */
 
        /* HMC info */
        struct i40e_hmc_info hmc; /* HMC info struct */
@@ -597,6 +634,13 @@ struct i40e_hw {
        struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
        struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
 
        struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
        struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
 
+       /* WoL and proxy support */
+       u16 num_wol_proxy_filters;
+       u16 wol_proxy_vsi_seid;
+
+#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+       u64 flags;
+
        /* debug mask */
        u32 debug_mask;
        char err_str[16];
        /* debug mask */
        u32 debug_mask;
        char err_str[16];
@@ -604,7 +648,8 @@ struct i40e_hw {
 
 static INLINE bool i40e_is_vf(struct i40e_hw *hw)
 {
 
 static INLINE bool i40e_is_vf(struct i40e_hw *hw)
 {
-       return hw->mac.type == I40E_MAC_VF;
+       return (hw->mac.type == I40E_MAC_VF ||
+               hw->mac.type == I40E_MAC_X722_VF);
 }
 
 struct i40e_driver_version {
 }
 
 struct i40e_driver_version {
@@ -708,7 +753,7 @@ enum i40e_rx_desc_status_bits {
        I40E_RX_DESC_STATUS_CRCP_SHIFT          = 4,
        I40E_RX_DESC_STATUS_TSYNINDX_SHIFT      = 5, /* 2 BITS */
        I40E_RX_DESC_STATUS_TSYNVALID_SHIFT     = 7,
        I40E_RX_DESC_STATUS_CRCP_SHIFT          = 4,
        I40E_RX_DESC_STATUS_TSYNINDX_SHIFT      = 5, /* 2 BITS */
        I40E_RX_DESC_STATUS_TSYNVALID_SHIFT     = 7,
-       I40E_RX_DESC_STATUS_RESERVED1_SHIFT     = 8,
+       I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT     = 8,
 
        I40E_RX_DESC_STATUS_UMBCAST_SHIFT       = 9, /* 2 BITS */
        I40E_RX_DESC_STATUS_FLM_SHIFT           = 11,
 
        I40E_RX_DESC_STATUS_UMBCAST_SHIFT       = 9, /* 2 BITS */
        I40E_RX_DESC_STATUS_FLM_SHIFT           = 11,
@@ -716,7 +761,7 @@ enum i40e_rx_desc_status_bits {
        I40E_RX_DESC_STATUS_LPBK_SHIFT          = 14,
        I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT     = 15,
        I40E_RX_DESC_STATUS_RESERVED2_SHIFT     = 16, /* 2 BITS */
        I40E_RX_DESC_STATUS_LPBK_SHIFT          = 14,
        I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT     = 15,
        I40E_RX_DESC_STATUS_RESERVED2_SHIFT     = 16, /* 2 BITS */
-       I40E_RX_DESC_STATUS_UDP_0_SHIFT         = 18,
+       I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT     = 18,
        I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
 };
 
        I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
 };
 
@@ -1094,6 +1139,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
 #define I40E_TXD_CTX_QW0_DECTTL_MASK   (0xFULL << \
                                         I40E_TXD_CTX_QW0_DECTTL_SHIFT)
 
 #define I40E_TXD_CTX_QW0_DECTTL_MASK   (0xFULL << \
                                         I40E_TXD_CTX_QW0_DECTTL_SHIFT)
 
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT  23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK   BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
 struct i40e_nop_desc {
        __le64 rsvd;
        __le64 dtype_cmd;
 struct i40e_nop_desc {
        __le64 rsvd;
        __le64 dtype_cmd;
@@ -1130,15 +1177,24 @@ struct i40e_filter_program_desc {
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-       /* Note: Values 0-30 are reserved for future use */
+       /* Note: Values 0-28 are reserved for future use.
+        * Value 29, 30, 32 are not supported on XL710 and X710.
+        */
+       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP        = 29,
+       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP      = 30,
        I40E_FILTER_PCTYPE_NONF_IPV4_UDP                = 31,
        I40E_FILTER_PCTYPE_NONF_IPV4_UDP                = 31,
-       /* Note: Value 32 is reserved for future use */
+       I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK     = 32,
        I40E_FILTER_PCTYPE_NONF_IPV4_TCP                = 33,
        I40E_FILTER_PCTYPE_NONF_IPV4_SCTP               = 34,
        I40E_FILTER_PCTYPE_NONF_IPV4_OTHER              = 35,
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
        I40E_FILTER_PCTYPE_NONF_IPV4_TCP                = 33,
        I40E_FILTER_PCTYPE_NONF_IPV4_SCTP               = 34,
        I40E_FILTER_PCTYPE_NONF_IPV4_OTHER              = 35,
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
-       /* Note: Values 37-40 are reserved for future use */
+       /* Note: Values 37-38 are reserved for future use.
+        * Value 39, 40, 42 are not supported on XL710 and X710.
+        */
+       I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP        = 39,
+       I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP      = 40,
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
+       I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK     = 42,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
        I40E_FILTER_PCTYPE_NONF_IPV6_SCTP               = 44,
        I40E_FILTER_PCTYPE_NONF_IPV6_OTHER              = 45,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
        I40E_FILTER_PCTYPE_NONF_IPV6_SCTP               = 44,
        I40E_FILTER_PCTYPE_NONF_IPV6_OTHER              = 45,
@@ -1194,6 +1250,10 @@ enum i40e_filter_program_desc_pcmd {
 #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
                                          I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
                                          I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
 
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT    (0xEULL + \
+                                        I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK     BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
 #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK        (0x1FFUL << \
                                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
 #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK        (0x1FFUL << \
                                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@@ -1727,4 +1787,37 @@ struct i40e_lldp_variables {
 
 /* RSS Hash Table Size */
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_512        0x00010000
 
 /* RSS Hash Table Size */
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_512        0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT              47
+#define I40E_L3_SRC_MASK               (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT           43
+#define I40E_L3_V6_SRC_MASK            (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT              35
+#define I40E_L3_DST_MASK               (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT           35
+#define I40E_L3_V6_DST_MASK            (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT              34
+#define I40E_L4_SRC_MASK               (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT              33
+#define I40E_L4_DST_MASK               (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT          31
+#define I40E_VERIFY_TAG_MASK           (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT             13
+#define I40E_FLEX_50_MASK              (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT             12
+#define I40E_FLEX_51_MASK              (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT             11
+#define I40E_FLEX_52_MASK              (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT             10
+#define I40E_FLEX_53_MASK              (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT             9
+#define I40E_FLEX_54_MASK              (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT             8
+#define I40E_FLEX_55_MASK              (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT             7
+#define I40E_FLEX_56_MASK              (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT             6
+#define I40E_FLEX_57_MASK              (0x1ULL << I40E_FLEX_57_SHIFT)
 #endif /* _I40E_TYPE_H_ */
 #endif /* _I40E_TYPE_H_ */
similarity index 88%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_virtchnl.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_virtchnl.h
index ef78a917d253c2a660c4400b93a44b54f9d23494..7a900010bd2673a21f7f2bfaa8103a5efb1ffdd9 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -80,7 +77,12 @@ enum i40e_virtchnl_ops {
        I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
        I40E_VIRTCHNL_OP_GET_STATS = 15,
        I40E_VIRTCHNL_OP_FCOE = 16,
        I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
        I40E_VIRTCHNL_OP_GET_STATS = 15,
        I40E_VIRTCHNL_OP_FCOE = 16,
-       I40E_VIRTCHNL_OP_EVENT = 17,
+       I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+       I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+       I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+       I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+       I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
@@ -153,6 +155,8 @@ struct i40e_virtchnl_vsi_resource {
 #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR     0x00000020
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
 #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR     0x00000020
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF                0X00080000
 
 struct i40e_virtchnl_vf_resource {
        u16 num_vsis;
 
 struct i40e_virtchnl_vf_resource {
        u16 num_vsis;
@@ -161,8 +165,8 @@ struct i40e_virtchnl_vf_resource {
        u16 max_mtu;
 
        u32 vf_offload_flags;
        u16 max_mtu;
 
        u32 vf_offload_flags;
-       u32 max_fcoe_contexts;
-       u32 max_fcoe_filters;
+       u32 rss_key_size;
+       u32 rss_lut_size;
 
        struct i40e_virtchnl_vsi_resource vsi_res[1];
 };
 
        struct i40e_virtchnl_vsi_resource vsi_res[1];
 };
@@ -321,6 +325,39 @@ struct i40e_virtchnl_promisc_info {
  * PF replies with struct i40e_eth_stats in an external buffer.
  */
 
  * PF replies with struct i40e_eth_stats in an external buffer.
  */
 
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the rss fields in
+ * the vf resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+       u16 vsi_id;
+       u16 key_len;
+       u8 key[1];         /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+       u16 vsi_id;
+       u16 lut_entries;
+       u8 lut[1];        /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+       u64 hena;
+};
+
 /* I40E_VIRTCHNL_OP_EVENT
  * PF sends this message to inform the VF driver of events that may affect it.
  * No direct response is expected from the VF, though it may generate other
 /* I40E_VIRTCHNL_OP_EVENT
  * PF sends this message to inform the VF driver of events that may affect it.
  * No direct response is expected from the VF, though it may generate other
similarity index 79%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c
rename to i40e-dkms/i40e-1.5.18/src/i40e_virtchnl_pf.c
index 3af1ad9c4418b72a7ad5e4d0c644d36fa8c1ad4c..c53a2f490120ea111f0c9ae313c430cb72b712e6 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -48,7 +45,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
        int i;
 
        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
        int i;
 
        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
-               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+               int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
                /* Not all vfs are enabled so skip the ones that are not */
                if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
                    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
                /* Not all vfs are enabled so skip the ones that are not */
                if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
                    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
@@ -63,7 +60,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
 }
 
 /**
 }
 
 /**
- * i40e_vc_notify_link_state
+ * i40e_vc_notify_vf_link_state
  * @vf: pointer to the VF structure
  *
  * send a link status message to a single VF
  * @vf: pointer to the VF structure
  *
  * send a link status message to a single VF
@@ -74,7 +71,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_link_status *ls = &pf->hw.phy.link_info;
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_link_status *ls = &pf->hw.phy.link_info;
-       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+       int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
 
        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
 
        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
@@ -145,7 +142,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
            !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
                return;
 
            !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
                return;
 
-       abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+       abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
 
        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
 
        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
@@ -293,8 +290,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
        next_q = find_first_bit(&linklistmap,
                                (I40E_MAX_VSI_QP *
                                 I40E_VIRTCHNL_SUPPORTED_QTYPES));
        next_q = find_first_bit(&linklistmap,
                                (I40E_MAX_VSI_QP *
                                 I40E_VIRTCHNL_SUPPORTED_QTYPES));
-       vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
-       qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
+       vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
+       qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 
        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 
@@ -464,7 +461,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
                rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 
                /* set splitalways mode 10b */
                rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 
                /* set splitalways mode 10b */
-               rx_ctx.dtype = 0x2;
+               rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
        }
 
        /* databuffer length validation */
        }
 
        /* databuffer length validation */
@@ -552,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 
                spin_lock_bh(&vsi->mac_filter_list_lock);
                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 
                spin_lock_bh(&vsi->mac_filter_list_lock);
-               f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
-                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
-                                   true, false);
-               if (!f)
-                       dev_info(&pf->pdev->dev,
-                                "Could not allocate VF MAC addr\n");
+               if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
+                       f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+                                      vf->port_vlan_id ? vf->port_vlan_id : -1,
+                                      true, false);
+                       if (!f)
+                               dev_info(&pf->pdev->dev,
+                                       "Could not add MAC filter %pM for VF %d\n",
+                                       vf->default_lan_addr.addr, vf->vf_id);
+               }
                f = i40e_add_filter(vsi, brdcast,
                                    vf->port_vlan_id ? vf->port_vlan_id : -1,
                                    true, false);
                f = i40e_add_filter(vsi, brdcast,
                                    vf->port_vlan_id ? vf->port_vlan_id : -1,
                                    true, false);
@@ -568,7 +568,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
        }
 
        /* program mac filter */
        }
 
        /* program mac filter */
-       ret = i40e_sync_vsi_filters(vsi, false);
+       ret = i40e_sync_vsi_filters(vsi);
        if (ret)
                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 
        if (ret)
                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 
@@ -602,8 +602,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
         * that VF queues be mapped using this method, even when they are
         * contiguous in real life
         */
         * that VF queues be mapped using this method, even when they are
         * contiguous in real life
         */
-       wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
-            I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+       i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+                         I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 
        /* enable VF vplan_qtable mappings */
        reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 
        /* enable VF vplan_qtable mappings */
        reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
@@ -630,7 +630,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
                                                      (j * 2) + 1);
                        reg |= qid << 16;
                }
                                                      (j * 2) + 1);
                        reg |= qid << 16;
                }
-               wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
+               i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
+                                 reg);
        }
 
        i40e_flush(hw);
        }
 
        i40e_flush(hw);
@@ -674,6 +675,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
                i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
                vf->lan_vsi_idx = 0;
                vf->lan_vsi_id = 0;
                i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
                vf->lan_vsi_idx = 0;
                vf->lan_vsi_id = 0;
+               vf->num_mac = 0;
        }
        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 
        }
        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 
@@ -729,7 +731,11 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
        if (ret)
                goto error_alloc;
        total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
        if (ret)
                goto error_alloc;
        total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
-       set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+       if (vf->trusted)
+               set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+       else
+               clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 
        /* store the total qps number for the runtime
         * VF req validation
 
        /* store the total qps number for the runtime
         * VF req validation
@@ -786,9 +792,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
 {
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
 {
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
+       u32 reg, reg_idx, bit_idx;
        bool rsd = false;
        int i;
        bool rsd = false;
        int i;
-       u32 reg;
 
        if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
                return;
 
        if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
                return;
@@ -804,8 +810,12 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
                reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
                reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
                wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
                reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
                reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
                wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
-               i40e_flush(hw);
        }
        }
+       /* clear the VFLR bit in GLGEN_VFLRSTAT */
+       reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+       bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+       i40e_flush(hw);
 
        if (i40e_quiesce_vf_pci(vf))
                dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
 
        if (i40e_quiesce_vf_pci(vf))
                dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
@@ -856,6 +866,7 @@ complete_reset:
        }
        /* tell the VF the reset is done */
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
        }
        /* tell the VF the reset is done */
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+
        i40e_flush(hw);
        clear_bit(__I40E_VF_DISABLE, &pf->state);
 }
        i40e_flush(hw);
        clear_bit(__I40E_VF_DISABLE, &pf->state);
 }
@@ -964,9 +975,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
 
                /* assign default capabilities */
                set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
 
                /* assign default capabilities */
                set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
                vfs[i].spoofchk = true;
                vfs[i].spoofchk = true;
-#endif
                /* VF resources get allocated during reset */
                i40e_reset_vf(&vfs[i], false);
 
                /* VF resources get allocated during reset */
                i40e_reset_vf(&vfs[i], false);
 
@@ -978,7 +987,7 @@ err_alloc:
                i40e_free_vfs(pf);
 err_iov:
        /* Re-enable interrupt 0. */
                i40e_free_vfs(pf);
 err_iov:
        /* Re-enable interrupt 0. */
-       i40e_irq_dynamic_enable_icr0(pf);
+       i40e_irq_dynamic_enable_icr0(pf, false);
        return ret;
 }
 
        return ret;
 }
 
@@ -1097,8 +1106,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
        /* single place to detect unsuccessful return values */
        if (v_retval) {
                vf->num_invalid_msgs++;
        /* single place to detect unsuccessful return values */
        if (v_retval) {
                vf->num_invalid_msgs++;
-               dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
-                       v_opcode, v_retval);
+               dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+                        vf->vf_id, v_opcode, v_retval);
                if (vf->num_invalid_msgs >
                    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
                        dev_err(&pf->pdev->dev,
                if (vf->num_invalid_msgs >
                    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
                        dev_err(&pf->pdev->dev,
@@ -1113,12 +1122,12 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
                vf->num_invalid_msgs = 0;
        }
 
                vf->num_invalid_msgs = 0;
        }
 
-       aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
+       aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
                                        msg, msglen, NULL);
        if (aq_ret) {
                                        msg, msglen, NULL);
        if (aq_ret) {
-               dev_err(&pf->pdev->dev,
-                       "Unable to send the message to VF %d aq_err %d\n",
-                       vf->vf_id, pf->hw.aq.asq_last_status);
+               dev_info(&pf->pdev->dev,
+                        "Unable to send the message to VF %d aq_err %d\n",
+                        vf->vf_id, pf->hw.aq.asq_last_status);
                return -EIO;
        }
 
                return -EIO;
        }
 
@@ -1176,8 +1185,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
        struct i40e_vsi *vsi;
        struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
        struct i40e_vsi *vsi;
-       int i = 0, len = 0;
        int num_vsis = 1;
        int num_vsis = 1;
+       int len = 0;
        int ret;
 
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
        int ret;
 
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -1205,24 +1214,56 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi->info.pvid)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi->info.pvid)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
-               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
 
 
-       if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+       if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF;
+       } else {
+               if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+                   (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+                       vfres->vf_offload_flags |=
+                                       I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+               else
+                       vfres->vf_offload_flags |=
+                                       I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+       }
+       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+               if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                       vfres->vf_offload_flags |=
+                               I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+       }
+
+       if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+               if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+                       dev_err(&pf->pdev->dev,
+                               "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
+                                vf->vf_id);
+                       ret = I40E_ERR_PARAM;
+                       goto err;
+               }
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+       }
+
+       if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
+               if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+                       vfres->vf_offload_flags |=
+                                       I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+       }
 
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
 
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+       vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+       vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+
        if (vf->lan_vsi_idx) {
        if (vf->lan_vsi_idx) {
-               vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
-               vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
-               vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+               vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+               vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+               vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
                /* VFs only use TC 0 */
                /* VFs only use TC 0 */
-               vfres->vsi_res[i].qset_handle
+               vfres->vsi_res[0].qset_handle
                                          = LE16_TO_CPU(vsi->info.qs_handle[0]);
                                          = LE16_TO_CPU(vsi->info.qs_handle[0]);
-               ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+               ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
                                vf->default_lan_addr.addr);
                                vf->default_lan_addr.addr);
-               i++;
        }
        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 
        }
        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 
@@ -1251,6 +1292,25 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
                i40e_reset_vf(vf, false);
 }
 
                i40e_reset_vf(vf, false);
 }
 
+/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of vlans offloaded on this vf
+ **/
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+       struct i40e_mac_filter *f;
+       int num_vlans = 0;
+
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+                       num_vlans++;
+       }
+
+       return num_vlans;
+}
+
 /**
  * i40e_vc_config_promiscuous_mode_msg
  * @vf: pointer to the VF info
 /**
  * i40e_vc_config_promiscuous_mode_msg
  * @vf: pointer to the VF info
@@ -1265,24 +1325,123 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
 {
        struct i40e_virtchnl_promisc_info *info =
            (struct i40e_virtchnl_promisc_info *)msg;
 {
        struct i40e_virtchnl_promisc_info *info =
            (struct i40e_virtchnl_promisc_info *)msg;
+       i40e_status aq_ret = I40E_SUCCESS;
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
+       struct i40e_mac_filter *f;
        bool allmulti = false;
        bool allmulti = false;
+       bool alluni = false;
+       int aq_err = 0;
        struct i40e_vsi *vsi;
        struct i40e_vsi *vsi;
-       i40e_status aq_ret;
 
        vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
 
        vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
-           !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
-           (vsi->type != I40E_VSI_FCOE)) {
+           !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+               dev_err(&pf->pdev->dev,
+                       "VF %d doesn't meet requirements to enter promiscuous mode\n",
+                       vf->vf_id);
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
+       /* Multicast promiscuous handling*/
        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
                allmulti = true;
        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
                allmulti = true;
-       aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
-                                                      allmulti, NULL);
+       if (vf->port_vlan_id) {
+               aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
+                                                           allmulti,
+                                                           vf->port_vlan_id,
+                                                           NULL);
+       } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+               list_for_each_entry(f, &vsi->mac_filter_list, list) {
+                       if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+                               continue;
+                       aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
+                                                                   vsi->seid,
+                                                                   allmulti,
+                                                                   f->vlan,
+                                                                   NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
+                       if (aq_ret) {
+                               dev_err(&pf->pdev->dev,
+                                       "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
+                                       f->vlan,
+                                       i40e_stat_str(&pf->hw, aq_ret),
+                                       i40e_aq_str(&pf->hw, aq_err));
+                               break;
+                       }
+               }
+       } else {
+               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+                                                              allmulti, NULL);
+               aq_err = pf->hw.aq.asq_last_status;
+               if (aq_ret) {
+                       dev_err(&pf->pdev->dev,
+                               "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+                               vf->vf_id,
+                               i40e_stat_str(&pf->hw, aq_ret),
+                               i40e_aq_str(&pf->hw, aq_err));
+                       goto error_param_int;
+               }
+       }
+
+       if (!aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "VF %d successfully set multicast promiscuous mode\n",
+                        vf->vf_id);
+               if (allmulti)
+                       set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+               else
+                       clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+       }
+
+       if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+               alluni = true;
+       if (vf->port_vlan_id) {
+               aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
+                                                           alluni,
+                                                           vf->port_vlan_id,
+                                                           NULL);
+       } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+               list_for_each_entry(f, &vsi->mac_filter_list, list) {
+                       if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+                               continue;
+                       aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
+                                                                   vsi->seid,
+                                                                   alluni,
+                                                                   f->vlan,
+                                                                   NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
+                       if (aq_ret)
+                               dev_err(&pf->pdev->dev,
+                                       "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
+                                       f->vlan,
+                                       i40e_stat_str(&pf->hw, aq_ret),
+                                       i40e_aq_str(&pf->hw, aq_err));
+               }
+       } else {
+               aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+                                                            allmulti, NULL,
+                                                            true);
+               aq_err = pf->hw.aq.asq_last_status;
+               if (aq_ret)
+                       dev_err(&pf->pdev->dev,
+                               "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
+                               vf->vf_id, info->flags,
+                               i40e_stat_str(&pf->hw, aq_ret),
+                               i40e_aq_str(&pf->hw, aq_err));
+       }
+
+error_param_int:
+       if (!aq_ret) {
+               dev_info(&pf->pdev->dev,
+                        "VF %d successfully set unicast promiscuous mode\n",
+                        vf->vf_id);
+               if (alluni)
+                       set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+               else
+                       clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+       }
 
 error_param:
        /* send the response to the VF */
 
 error_param:
        /* send the response to the VF */
@@ -1534,6 +1693,10 @@ error_param:
                                      (u8 *)&stats, sizeof(stats));
 }
 
                                      (u8 *)&stats, sizeof(stats));
 }
 
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_VLAN_PER_VF 8
+
 /**
  * i40e_check_vf_permission
  * @vf: pointer to the VF info
 /**
  * i40e_check_vf_permission
  * @vf: pointer to the VF info
@@ -1554,15 +1717,22 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
                dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
                ret = I40E_ERR_INVALID_MAC_ADDR;
        } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
                dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
                ret = I40E_ERR_INVALID_MAC_ADDR;
        } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+                  !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
                   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
                /* If the host VMM administrator has set the VF MAC address
                 * administratively via the ndo_set_vf_mac command then deny
                 * permission to the VF to add or delete unicast MAC addresses.
                   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
                /* If the host VMM administrator has set the VF MAC address
                 * administratively via the ndo_set_vf_mac command then deny
                 * permission to the VF to add or delete unicast MAC addresses.
+                * Unless the VF is privileged and then it can do whatever.
                 * The VF may request to set the MAC address filter already
                 * assigned to it so do not return an error in that case.
                 */
                dev_err(&pf->pdev->dev,
                 * The VF may request to set the MAC address filter already
                 * assigned to it so do not return an error in that case.
                 */
                dev_err(&pf->pdev->dev,
-                       "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+                       "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+               ret = -EPERM;
+       } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
+                  !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+               dev_err(&pf->pdev->dev,
+                       "VF is not trusted, switch the VF to trusted to add more functionality\n");
                ret = -EPERM;
        }
        return ret;
                ret = -EPERM;
        }
        return ret;
@@ -1587,7 +1757,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        int i;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
        int i;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1621,16 +1790,19 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 
                if (!f) {
                        dev_err(&pf->pdev->dev,
 
                if (!f) {
                        dev_err(&pf->pdev->dev,
-                               "Unable to add VF MAC filter\n");
+                               "Unable to add MAC filter %pM for VF %d\n",
+                                al->list[i].addr, vf->vf_id);
                        ret = I40E_ERR_PARAM;
                        spin_unlock_bh(&vsi->mac_filter_list_lock);
                        goto error_param;
                        ret = I40E_ERR_PARAM;
                        spin_unlock_bh(&vsi->mac_filter_list_lock);
                        goto error_param;
+               } else {
+                       vf->num_mac++;
                }
        }
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* program the updated filter list */
                }
        }
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* program the updated filter list */
-       ret = i40e_sync_vsi_filters(vsi, false);
+       ret = i40e_sync_vsi_filters(vsi);
        if (ret)
                dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
                        vf->vf_id, ret);
        if (ret)
                dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
                        vf->vf_id, ret);
@@ -1660,7 +1832,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        int i;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
        int i;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1669,8 +1840,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        for (i = 0; i < al->num_elements; i++) {
                if (is_broadcast_ether_addr(al->list[i].addr) ||
                    is_zero_ether_addr(al->list[i].addr)) {
        for (i = 0; i < al->num_elements; i++) {
                if (is_broadcast_ether_addr(al->list[i].addr) ||
                    is_zero_ether_addr(al->list[i].addr)) {
-                       dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
-                               al->list[i].addr);
+                       dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
+                               al->list[i].addr, vf->vf_id);
                        ret = I40E_ERR_INVALID_MAC_ADDR;
                        goto error_param;
                }
                        ret = I40E_ERR_INVALID_MAC_ADDR;
                        goto error_param;
                }
@@ -1680,12 +1851,18 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        spin_lock_bh(&vsi->mac_filter_list_lock);
        /* delete addresses from the list */
        for (i = 0; i < al->num_elements; i++)
        spin_lock_bh(&vsi->mac_filter_list_lock);
        /* delete addresses from the list */
        for (i = 0; i < al->num_elements; i++)
-               i40e_del_filter(vsi, al->list[i].addr,
-                               I40E_VLAN_ANY, true, false);
+               if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
+                       ret = I40E_ERR_INVALID_MAC_ADDR;
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       goto error_param;
+               } else {
+                       vf->num_mac--;
+               }
+
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* program the updated filter list */
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* program the updated filter list */
-       ret = i40e_sync_vsi_filters(vsi, false);
+       ret = i40e_sync_vsi_filters(vsi);
        if (ret)
                dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
                        vf->vf_id, ret);
        if (ret)
                dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
                        vf->vf_id, ret);
@@ -1714,8 +1891,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        i40e_status aq_ret = 0;
        int i;
 
        i40e_status aq_ret = 0;
        int i;
 
+       if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+               dev_err(&pf->pdev->dev,
+                       "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+               goto error_param;
+       }
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1739,11 +1921,24 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        for (i = 0; i < vfl->num_elements; i++) {
                /* add new VLAN filter */
                int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
        for (i = 0; i < vfl->num_elements; i++) {
                /* add new VLAN filter */
                int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+               if (!ret)
+                       vf->num_vlan++;
+
+               if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+                       i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+                                                          true,
+                                                          vfl->vlan_id[i],
+                                                          NULL);
+               if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+                       i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+                                                          true,
+                                                          vfl->vlan_id[i],
+                                                          NULL);
 
                if (ret)
                        dev_err(&pf->pdev->dev,
 
                if (ret)
                        dev_err(&pf->pdev->dev,
-                               "Unable to add VF vlan filter %d, error %d\n",
-                               vfl->vlan_id[i], ret);
+                               "Unable to add VLAN filter %d for VF %d, error %d\n",
+                               vfl->vlan_id[i], vf->vf_id, ret);
        }
 
 error_param:
        }
 
 error_param:
@@ -1770,7 +1965,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        int i;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
        int i;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1791,11 +1985,24 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 
        for (i = 0; i < vfl->num_elements; i++) {
                int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
 
        for (i = 0; i < vfl->num_elements; i++) {
                int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+               if (!ret)
+                       vf->num_vlan--;
+
+               if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+                       i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+                                                          false,
+                                                          vfl->vlan_id[i],
+                                                          NULL);
+               if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+                       i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+                                                          false,
+                                                          vfl->vlan_id[i],
+                                                          NULL);
 
                if (ret)
                        dev_err(&pf->pdev->dev,
 
                if (ret)
                        dev_err(&pf->pdev->dev,
-                               "Unable to delete VF vlan filter %d, error %d\n",
-                               vfl->vlan_id[i], ret);
+                               "Unable to delete VLAN filter %d for VF %d, error %d\n",
+                               vfl->vlan_id[i], vf->vf_id, ret);
        }
 
 error_param:
        }
 
 error_param:
@@ -1803,6 +2010,135 @@ error_param:
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
 }
 
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
 }
 
+/**
+ * i40e_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS key
+ **/
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_rss_key *vrk =
+               (struct i40e_virtchnl_rss_key *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = NULL;
+       u16 vsi_id = vrk->vsi_id;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+           (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto err;
+       }
+
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
+err:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS LUT
+ **/
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_rss_lut *vrl =
+               (struct i40e_virtchnl_rss_lut *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = NULL;
+       u16 vsi_id = vrl->vsi_id;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+           (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto err;
+       }
+
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
+       /* send the response to the VF */
+err:
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_get_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Return the RSS HENA bits allowed by the hardware
+ **/
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_rss_hena *vrh = NULL;
+       struct i40e_pf *pf = vf->pf;
+       i40e_status aq_ret = 0;
+       int len = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto err;
+       }
+       len = sizeof(struct i40e_virtchnl_rss_hena);
+
+       vrh = kzalloc(len, GFP_KERNEL);
+       if (!vrh) {
+               aq_ret = I40E_ERR_NO_MEMORY;
+               len = 0;
+               goto err;
+       }
+       vrh->hena = i40e_pf_get_default_rss_hena(pf);
+err:
+       /* send the response back to the VF */
+       aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+                                       aq_ret, (u8 *)vrh, len);
+       return aq_ret;
+}
+
+/**
+ * i40e_vc_set_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Set the RSS HENA bits for the VF
+ **/
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_rss_hena *vrh =
+               (struct i40e_virtchnl_rss_hena *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto err;
+       }
+       i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+       i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
+                         (u32)(vrh->hena >> 32));
+
+       /* send the response to the VF */
+err:
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+                                      aq_ret);
+}
+
 /**
  * i40e_vc_validate_vf_msg
  * @vf: pointer to the VF info
 /**
  * i40e_vc_validate_vf_msg
  * @vf: pointer to the VF info
@@ -1816,7 +2152,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
                                   u32 v_retval, u8 *msg, u16 msglen)
 {
        bool err_msg_format = false;
                                   u32 v_retval, u8 *msg, u16 msglen)
 {
        bool err_msg_format = false;
-       int valid_len;
+       int valid_len = 0;
 
        /* Check if VF is disabled. */
        if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
 
        /* Check if VF is disabled. */
        if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
@@ -1828,13 +2164,10 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
                valid_len = sizeof(struct i40e_virtchnl_version_info);
                break;
        case I40E_VIRTCHNL_OP_RESET_VF:
                valid_len = sizeof(struct i40e_virtchnl_version_info);
                break;
        case I40E_VIRTCHNL_OP_RESET_VF:
-               valid_len = 0;
                break;
        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
                if (VF_IS_V11(vf))
                        valid_len = sizeof(u32);
                break;
        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
                if (VF_IS_V11(vf))
                        valid_len = sizeof(u32);
-               else
-                       valid_len = 0;
                break;
        case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
                valid_len = sizeof(struct i40e_virtchnl_txq_info);
                break;
        case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
                valid_len = sizeof(struct i40e_virtchnl_txq_info);
@@ -1898,6 +2231,35 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
        case I40E_VIRTCHNL_OP_GET_STATS:
                valid_len = sizeof(struct i40e_virtchnl_queue_select);
                break;
        case I40E_VIRTCHNL_OP_GET_STATS:
                valid_len = sizeof(struct i40e_virtchnl_queue_select);
                break;
+       case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+               valid_len = sizeof(struct i40e_virtchnl_rss_key);
+               if (msglen >= valid_len) {
+                       struct i40e_virtchnl_rss_key *vrk =
+                               (struct i40e_virtchnl_rss_key *)msg;
+                       if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
+                               err_msg_format = true;
+                               break;
+                       }
+                       valid_len += vrk->key_len - 1;
+               }
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+               valid_len = sizeof(struct i40e_virtchnl_rss_lut);
+               if (msglen >= valid_len) {
+                       struct i40e_virtchnl_rss_lut *vrl =
+                               (struct i40e_virtchnl_rss_lut *)msg;
+                       if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
+                               err_msg_format = true;
+                               break;
+                       }
+                       valid_len += vrl->lut_entries - 1;
+               }
+               break;
+       case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+               break;
+       case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+               valid_len = sizeof(struct i40e_virtchnl_rss_hena);
+               break;
        /* These are always errors coming from the VF. */
        case I40E_VIRTCHNL_OP_EVENT:
        case I40E_VIRTCHNL_OP_UNKNOWN:
        /* These are always errors coming from the VF. */
        case I40E_VIRTCHNL_OP_EVENT:
        case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -1924,11 +2286,11 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
  * called from the common aeq/arq handler to
  * process request from VF
  **/
  * called from the common aeq/arq handler to
  * process request from VF
  **/
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
                           u32 v_retval, u8 *msg, u16 msglen)
 {
        struct i40e_hw *hw = &pf->hw;
                           u32 v_retval, u8 *msg, u16 msglen)
 {
        struct i40e_hw *hw = &pf->hw;
-       unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+       int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
        struct i40e_vf *vf;
        int ret;
 
        struct i40e_vf *vf;
        int ret;
 
@@ -1987,6 +2349,19 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
        case I40E_VIRTCHNL_OP_GET_STATS:
                ret = i40e_vc_get_stats_msg(vf, msg, msglen);
                break;
        case I40E_VIRTCHNL_OP_GET_STATS:
                ret = i40e_vc_get_stats_msg(vf, msg, msglen);
                break;
+       case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+               ret = i40e_vc_config_rss_key(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+               ret = i40e_vc_config_rss_lut(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+               ret = i40e_vc_get_rss_hena(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+               ret = i40e_vc_set_rss_hena(vf, msg, msglen);
+               break;
+
        case I40E_VIRTCHNL_OP_UNKNOWN:
        default:
                dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
        case I40E_VIRTCHNL_OP_UNKNOWN:
        default:
                dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -2008,14 +2383,19 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
  **/
 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
 {
  **/
 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
 {
-       u32 reg, reg_idx, bit_idx, vf_id;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_hw *hw = &pf->hw;
+       u32 reg, reg_idx, bit_idx;
        struct i40e_vf *vf;
        struct i40e_vf *vf;
+       int vf_id;
 
        if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
                return 0;
 
 
        if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
                return 0;
 
-       /* re-enable vflr interrupt cause */
+       /* Re-enable the VFLR interrupt cause here, before looking for which
+        * VF got reset. Otherwise, if another VF gets a reset while the
+        * first one is being processed, that interrupt will be lost, and
+        * that VF will be stuck in reset forever.
+        */
        reg = rd32(hw, I40E_PFINT_ICR0_ENA);
        reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
        wr32(hw, I40E_PFINT_ICR0_ENA, reg);
        reg = rd32(hw, I40E_PFINT_ICR0_ENA);
        reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
        wr32(hw, I40E_PFINT_ICR0_ENA, reg);
@@ -2028,13 +2408,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
                /* read GLGEN_VFLRSTAT register to find out the flr vfs */
                vf = &pf->vf[vf_id];
                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
                /* read GLGEN_VFLRSTAT register to find out the flr vfs */
                vf = &pf->vf[vf_id];
                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
-               if (reg & BIT(bit_idx)) {
-                       /* clear the bit in GLGEN_VFLRSTAT */
-                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-
-                       if (!test_bit(__I40E_DOWN, &pf->state))
-                               i40e_reset_vf(vf, true);
-               }
+               if (reg & BIT(bit_idx))
+                       /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
+                       i40e_reset_vf(vf, true);
        }
 
        return 0;
        }
 
        return 0;
@@ -2070,15 +2446,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        vf = &(pf->vf[vf_id]);
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
        vf = &(pf->vf[vf_id]);
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
-               dev_err(&pf->pdev->dev,
-                       "Uninitialized VF %d\n", vf_id);
-               ret = -EINVAL;
+               dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+                       vf_id);
+               ret = -EAGAIN;
                goto error_param;
        }
 
                goto error_param;
        }
 
-       if (!is_valid_ether_addr(mac)) {
+       if (is_multicast_ether_addr(mac)) {
                dev_err(&pf->pdev->dev,
                dev_err(&pf->pdev->dev,
-                       "Invalid VF ethernet address\n");
+                       "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
                ret = -EINVAL;
                goto error_param;
        }
                ret = -EINVAL;
                goto error_param;
        }
@@ -2089,9 +2465,10 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        spin_lock_bh(&vsi->mac_filter_list_lock);
 
        /* delete the temporary mac address */
        spin_lock_bh(&vsi->mac_filter_list_lock);
 
        /* delete the temporary mac address */
-       i40e_del_filter(vsi, vf->default_lan_addr.addr,
-                       vf->port_vlan_id ? vf->port_vlan_id : -1,
-                       true, false);
+       if (!is_zero_ether_addr(vf->default_lan_addr.addr))
+               i40e_del_filter(vsi, vf->default_lan_addr.addr,
+                               vf->port_vlan_id ? vf->port_vlan_id : -1,
+                               true, false);
 
        /* Delete all the filters for this VSI - we're going to kill it
         * anyway.
 
        /* Delete all the filters for this VSI - we're going to kill it
         * anyway.
@@ -2103,7 +2480,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
 
        dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
        /* program mac filter */
 
        dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
        /* program mac filter */
-       if (i40e_sync_vsi_filters(vsi, false)) {
+       if (i40e_sync_vsi_filters(vsi)) {
                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
                ret = -EIO;
                goto error_param;
                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
                ret = -EIO;
                goto error_param;
@@ -2154,8 +2531,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
        vf = &(pf->vf[vf_id]);
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
        vf = &(pf->vf[vf_id]);
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
-               dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
-               ret = -EINVAL;
+               dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+                       vf_id);
+               ret = -EAGAIN;
                goto error_pvid;
        }
 
                goto error_pvid;
        }
 
@@ -2176,6 +2554,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                 * and then reloading the VF driver.
                 */
                i40e_vc_disable_vf(pf, vf);
                 * and then reloading the VF driver.
                 */
                i40e_vc_disable_vf(pf, vf);
+               /* During reset the VF got a new VSI, so refresh the pointer. */
+               vsi = pf->vsi[vf->lan_vsi_idx];
        }
 
        /* Check for condition where there was already a port VLAN ID
        }
 
        /* Check for condition where there was already a port VLAN ID
@@ -2272,8 +2652,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int max_tx_rate)
        vf = &(pf->vf[vf_id]);
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
        vf = &(pf->vf[vf_id]);
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
-               dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
-               ret = -EINVAL;
+               dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+                       vf_id);
+               ret = -EAGAIN;
                goto error;
        }
 
                goto error;
        }
 
@@ -2281,6 +2662,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int max_tx_rate)
        case I40E_LINK_SPEED_40GB:
                speed = 40000;
                break;
        case I40E_LINK_SPEED_40GB:
                speed = 40000;
                break;
+       case I40E_LINK_SPEED_20GB:
+               speed = 20000;
+               break;
        case I40E_LINK_SPEED_10GB:
                speed = 10000;
                break;
        case I40E_LINK_SPEED_10GB:
                speed = 10000;
                break;
@@ -2359,8 +2743,9 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
        /* first vsi is always the LAN vsi */
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
        /* first vsi is always the LAN vsi */
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
-               dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
-               ret = -EINVAL;
+               dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+                       vf_id);
+               ret = -EAGAIN;
                goto error_param;
        }
 
                goto error_param;
        }
 
@@ -2486,6 +2871,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
        }
 
        vf = &(pf->vf[vf_id]);
        }
 
        vf = &(pf->vf[vf_id]);
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+               dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+                       vf_id);
+               ret = -EAGAIN;
+               goto out;
+       }
 
        if (enable == vf->spoofchk)
                goto out;
 
        if (enable == vf->spoofchk)
                goto out;
@@ -2507,6 +2898,47 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
 out:
        return ret;
 }
 out:
        return ret;
 }
+
 #endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
 #endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
+#ifdef HAVE_NDO_SET_VF_TRUST
+/**
+ * i40e_ndo_set_vf_trust
+ * @netdev: network interface device structure of the pf
+ * @vf_id: VF identifier
+ * @setting: trust setting
+ *
+ * Enable or disable VF trust setting
+ **/
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vf *vf;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+               return -EINVAL;
+       }
+
+       if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+               dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
+               return -EINVAL;
+       }
+
+       vf = &pf->vf[vf_id];
 
 
+       if (setting == vf->trusted)
+               goto out;
+
+       vf->trusted = setting;
+       i40e_vc_notify_vf_reset(vf);
+       i40e_reset_vf(vf, false);
+       dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+                vf_id, setting ? "" : "un");
+out:
+       return ret;
+}
+#endif /* HAVE_NDO_SET_VF_TRUST */
 #endif /* IFLA_VF_MAX */
 #endif /* IFLA_VF_MAX */
similarity index 89%
rename from i40e-dkms/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h
rename to i40e-dkms/i40e-1.5.18/src/i40e_virtchnl_pf.h
index 85a5131aae72d8e866e4333317e52fecf4b56101..fa261f062ae39694ec09286d2faa945d8079da5b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -60,6 +57,8 @@ enum i40e_vf_states {
        I40E_VF_STAT_ACTIVE,
        I40E_VF_STAT_FCOEENA,
        I40E_VF_STAT_DISABLED,
        I40E_VF_STAT_ACTIVE,
        I40E_VF_STAT_FCOEENA,
        I40E_VF_STAT_DISABLED,
+       I40E_VF_STAT_MC_PROMISC,
+       I40E_VF_STAT_UC_PROMISC,
 };
 
 /* VF capabilities */
 };
 
 /* VF capabilities */
@@ -76,7 +75,7 @@ struct i40e_vf {
        struct i40e_pf *pf;
 
        /* VF id in the PF space */
        struct i40e_pf *pf;
 
        /* VF id in the PF space */
-       u16 vf_id;
+       s16 vf_id;
        /* all VF vsis connect to the same parent */
        enum i40e_switch_element_types parent_type;
        struct i40e_virtchnl_version_info vf_ver;
        /* all VF vsis connect to the same parent */
        enum i40e_switch_element_types parent_type;
        struct i40e_virtchnl_version_info vf_ver;
@@ -89,13 +88,14 @@ struct i40e_vf {
        struct i40e_virtchnl_ether_addr default_fcoe_addr;
        u16 port_vlan_id;
        bool pf_set_mac;        /* The VMM admin set the VF MAC address */
        struct i40e_virtchnl_ether_addr default_fcoe_addr;
        u16 port_vlan_id;
        bool pf_set_mac;        /* The VMM admin set the VF MAC address */
+       bool trusted;
 
        /* VSI indices - actual VSI pointers are maintained in the PF structure
         * When assigned, these will be non-zero, because VSI 0 is always
         * the main LAN VSI for the PF.
         */
 
        /* VSI indices - actual VSI pointers are maintained in the PF structure
         * When assigned, these will be non-zero, because VSI 0 is always
         * the main LAN VSI for the PF.
         */
-       u8 lan_vsi_idx;         /* index into PF struct */
-       u8 lan_vsi_id;          /* ID as used by firmware */
+       u16 lan_vsi_idx;        /* index into PF struct */
+       u16 lan_vsi_id;         /* ID as used by firmware */
 #ifdef I40E_FCOE
        u8 fcoe_vsi_index;
        u8 fcoe_vsi_id;
 #ifdef I40E_FCOE
        u8 fcoe_vsi_index;
        u8 fcoe_vsi_id;
@@ -114,9 +114,12 @@ struct i40e_vf {
        bool link_forced;
        bool link_up;           /* only valid if VF link is forced */
 #endif
        bool link_forced;
        bool link_up;           /* only valid if VF link is forced */
 #endif
-#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
        bool spoofchk;
        bool spoofchk;
-#endif
+       u16 num_mac;
+       u16 num_vlan;
+
+       /* RDMA Client */
+       struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
 };
 
 void i40e_free_vfs(struct i40e_pf *pf);
 };
 
 void i40e_free_vfs(struct i40e_pf *pf);
@@ -124,7 +127,7 @@ void i40e_free_vfs(struct i40e_pf *pf);
 int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
 #endif
 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
 int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
 #endif
 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
                           u32 v_retval, u8 *msg, u16 msglen);
 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
 void i40e_reset_vf(struct i40e_vf *vf, bool flr);
                           u32 v_retval, u8 *msg, u16 msglen);
 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
 void i40e_reset_vf(struct i40e_vf *vf, bool flr);
@@ -140,6 +143,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
 #else
 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
 #endif
 #else
 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
 #endif
+#ifdef HAVE_NDO_SET_VF_TRUST
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
+#endif
 int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable);
 #ifdef IFLA_VF_MAX
 int i40e_ndo_get_vf_config(struct net_device *netdev,
 int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable);
 #ifdef IFLA_VF_MAX
 int i40e_ndo_get_vf_config(struct net_device *netdev,
similarity index 94%
rename from i40e-dkms/i40e-1.3.47/src/i40e/kcompat.c
rename to i40e-dkms/i40e-1.5.18/src/kcompat.c
index f51540d5a65d044c0b3ef358e921d4ccb318ca96..9459c1dc2ef4e357779189b5cc460455ccbcd0d1 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -1120,17 +1117,18 @@ int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev)
 #ifdef HAVE_TX_MQ
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
 #ifdef HAVE_TX_MQ
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
-void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
 {
        unsigned int real_num = dev->real_num_tx_queues;
        struct Qdisc *qdisc;
        int i;
 
 {
        unsigned int real_num = dev->real_num_tx_queues;
        struct Qdisc *qdisc;
        int i;
 
-       if (unlikely(txq > dev->num_tx_queues))
-               ;
+       if (txq < 1 || txq > dev->num_tx_queues)
+               return -EINVAL;
+
        else if (txq > real_num)
                dev->real_num_tx_queues = txq;
        else if (txq > real_num)
                dev->real_num_tx_queues = txq;
-       else if ( txq < real_num) {
+       else if (txq < real_num) {
                dev->real_num_tx_queues = txq;
                for (i = txq; i < dev->num_tx_queues; i++) {
                        qdisc = netdev_get_tx_queue(dev, i)->qdisc;
                dev->real_num_tx_queues = txq;
                for (i = txq; i < dev->num_tx_queues; i++) {
                        qdisc = netdev_get_tx_queue(dev, i)->qdisc;
@@ -1141,6 +1139,8 @@ void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
                        }
                }
        }
                        }
                }
        }
+
+       return 0;
 }
 #endif /* CONFIG_NETDEVICES_MULTIQUEUE */
 #endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
 }
 #endif /* CONFIG_NETDEVICES_MULTIQUEUE */
 #endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
@@ -1450,6 +1450,112 @@ int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
 }
 #endif /* < 3.7.0 */
 
 }
 #endif /* < 3.7.0 */
 
+/******************************************************************************
+ * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright,
+ * inferred copyright from kernel
+ */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+                      int target, unsigned short *fragoff, int *flags)
+{
+       unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
+       u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+       unsigned int len;
+       bool found;
+
+#define __KC_IP6_FH_F_FRAG     BIT(0)
+#define __KC_IP6_FH_F_AUTH     BIT(1)
+#define __KC_IP6_FH_F_SKIP_RH  BIT(2)
+
+       if (fragoff)
+               *fragoff = 0;
+
+       if (*offset) {
+               struct ipv6hdr _ip6, *ip6;
+
+               ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
+               if (!ip6 || (ip6->version != 6)) {
+                       printk(KERN_ERR "IPv6 header not found\n");
+                       return -EBADMSG;
+               }
+               start = *offset + sizeof(struct ipv6hdr);
+               nexthdr = ip6->nexthdr;
+       }
+       len = skb->len - start;
+
+       do {
+               struct ipv6_opt_hdr _hdr, *hp;
+               unsigned int hdrlen;
+               found = (nexthdr == target);
+
+               if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
+                       if (target < 0 || found)
+                               break;
+                       return -ENOENT;
+               }
+
+               hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
+               if (!hp)
+                       return -EBADMSG;
+
+               if (nexthdr == NEXTHDR_ROUTING) {
+                       struct ipv6_rt_hdr _rh, *rh;
+
+                       rh = skb_header_pointer(skb, start, sizeof(_rh),
+                                               &_rh);
+                       if (!rh)
+                               return -EBADMSG;
+
+                       if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) &&
+                           rh->segments_left == 0)
+                               found = false;
+               }
+
+               if (nexthdr == NEXTHDR_FRAGMENT) {
+                       unsigned short _frag_off;
+                       __be16 *fp;
+
+                       if (flags)      /* Indicate that this is a fragment */
+                               *flags |= __KC_IP6_FH_F_FRAG;
+                       fp = skb_header_pointer(skb,
+                                               start+offsetof(struct frag_hdr,
+                                                              frag_off),
+                                               sizeof(_frag_off),
+                                               &_frag_off);
+                       if (!fp)
+                               return -EBADMSG;
+
+                       _frag_off = ntohs(*fp) & ~0x7;
+                       if (_frag_off) {
+                               if (target < 0 &&
+                                   ((!ipv6_ext_hdr(hp->nexthdr)) ||
+                                    hp->nexthdr == NEXTHDR_NONE)) {
+                                       if (fragoff)
+                                               *fragoff = _frag_off;
+                                       return hp->nexthdr;
+                               }
+                               return -ENOENT;
+                       }
+                       hdrlen = 8;
+               } else if (nexthdr == NEXTHDR_AUTH) {
+                       if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0))
+                               break;
+                       hdrlen = (hp->hdrlen + 2) << 2;
+               } else
+                       hdrlen = ipv6_optlen(hp);
+
+               if (!found) {
+                       nexthdr = hp->nexthdr;
+                       len -= hdrlen;
+                       start += hdrlen;
+               }
+       } while (!found);
+
+       *offset = start;
+       return nexthdr;
+}
+#endif /* < 3.8.0 */
+
 /******************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
 #ifdef CONFIG_XPS
 /******************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
 #ifdef CONFIG_XPS
similarity index 94%
rename from i40e-dkms/i40e-1.3.47/src/i40e/kcompat.h
rename to i40e-dkms/i40e-1.5.18/src/kcompat.h
index 1d43af658e62f2a593cf2b2aed76a0e449b93190..9c9f3cae2eccad975483277a8c57f43c892252db 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
 /*******************************************************************************
  *
- * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -56,6 +53,7 @@
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
 
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
 
+#include <net/ipv6.h>
 /* UTS_RELEASE is in a different header starting in kernel 2.6.18 */
 #ifndef UTS_RELEASE
 /* utsrelease.h changed locations in 2.6.33 */
 /* UTS_RELEASE is in a different header starting in kernel 2.6.18 */
 #ifndef UTS_RELEASE
 /* utsrelease.h changed locations in 2.6.33 */
@@ -2010,12 +2008,6 @@ static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
-
-/* other values will be created as #defines later */
-enum pci_bus_speed {
-       PCI_SPEED_UNKNOWN = 0xff,
-};
-
 enum pcie_link_width {
        PCIE_LNK_WIDTH_RESRV    = 0x00,
        PCIE_LNK_X1             = 0x01,
 enum pcie_link_width {
        PCIE_LNK_WIDTH_RESRV    = 0x00,
        PCIE_LNK_X1             = 0x01,
@@ -2296,6 +2288,20 @@ extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
 #define HAVE_ETHTOOL_GET_PERM_ADDR
 #endif /* 2.6.14 through 2.6.22 */
 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
 #define HAVE_ETHTOOL_GET_PERM_ADDR
 #endif /* 2.6.14 through 2.6.22 */
+
+static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom)
+{
+       int delta = 0;
+
+       if (headroom > (skb->data - skb->head))
+               delta = headroom - (skb->data - skb->head);
+
+       if (delta || skb_header_cloned(skb))
+               return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
+                                       GFP_ATOMIC);
+       return 0;
+}
+#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h))
 #endif /* < 2.6.23 */
 
 /*****************************************************************************/
 #endif /* < 2.6.23 */
 
 /*****************************************************************************/
@@ -2439,6 +2445,10 @@ static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
                (1UL << (fls_long(n) - 1))
 #endif
 
                (1UL << (fls_long(n) - 1))
 #endif
 
+#ifndef BIT
+#define BIT(nr)         (1UL << (nr))
+#endif
+
 #else /* < 2.6.24 */
 #define HAVE_ETHTOOL_GET_SSET_COUNT
 #define HAVE_NETDEV_NAPI_LIST
 #else /* < 2.6.24 */
 #define HAVE_ETHTOOL_GET_SSET_COUNT
 #define HAVE_NETDEV_NAPI_LIST
@@ -2700,6 +2710,13 @@ static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
 
 #define  PCI_EXP_SLTSTA_PDS    0x0040  /* Presence Detect State */
 
 
 #define  PCI_EXP_SLTSTA_PDS    0x0040  /* Presence Detect State */
 
+#ifndef PCI_EXP_LNKSTA_CLS
+#define  PCI_EXP_LNKSTA_CLS    0x000f  /* Current Link Speed */
+#endif
+#ifndef PCI_EXP_LNKSTA_NLW
+#define  PCI_EXP_LNKSTA_NLW    0x03f0  /* Negotiated Link Width */
+#endif
+
 #ifndef pci_clear_master
 extern void _kc_pci_clear_master(struct pci_dev *dev);
 #define pci_clear_master(dev)  _kc_pci_clear_master(dev)
 #ifndef pci_clear_master
 extern void _kc_pci_clear_master(struct pci_dev *dev);
 #define pci_clear_master(dev)  _kc_pci_clear_master(dev)
@@ -3181,9 +3198,20 @@ static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev)
 #endif
 #endif /* 2.6.0 => 2.6.34 */
 
 #endif
 #endif /* 2.6.0 => 2.6.34 */
 
-#define PCIE_SPEED_2_5GT 0x14
-#define PCIE_SPEED_5_0GT 0x15
-#define PCIE_SPEED_8_0GT 0x16
+#ifndef pci_bus_speed
+/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */
+enum _kc_pci_bus_speed {
+       _KC_PCIE_SPEED_2_5GT            = 0x14,
+       _KC_PCIE_SPEED_5_0GT            = 0x15,
+       _KC_PCIE_SPEED_8_0GT            = 0x16,
+       _KC_PCI_SPEED_UNKNOWN           = 0xff,
+};
+#define pci_bus_speed          _kc_pci_bus_speed
+#define PCIE_SPEED_2_5GT       _KC_PCIE_SPEED_2_5GT
+#define PCIE_SPEED_5_0GT       _KC_PCIE_SPEED_5_0GT
+#define PCIE_SPEED_8_0GT       _KC_PCIE_SPEED_8_0GT
+#define PCI_SPEED_UNKNOWN      _KC_PCI_SPEED_UNKNOWN
+#endif /* pci_bus_speed */
 
 #else /* < 2.6.34 */
 #define HAVE_SYSTEM_SLEEP_PM_OPS
 
 #else /* < 2.6.34 */
 #define HAVE_SYSTEM_SLEEP_PM_OPS
@@ -3209,22 +3237,29 @@ ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
 #ifndef numa_mem_id
 #define numa_mem_id numa_node_id
 #endif
 #ifndef numa_mem_id
 #define numa_mem_id numa_node_id
 #endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
 #ifdef HAVE_TX_MQ
 #include <net/sch_generic.h>
 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
 #ifdef HAVE_TX_MQ
 #include <net/sch_generic.h>
 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
-void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
-#define netif_set_real_num_tx_queues  _kc_netif_set_real_num_tx_queues
-#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
 #else /* CONFIG_NETDEVICES_MULTI_QUEUE */
 #else /* CONFIG_NETDEVICES_MULTI_QUEUE */
-#define netif_set_real_num_tx_queues(_netdev, _count) \
-       do { \
-               (_netdev)->egress_subqueue_count = _count; \
-       } while (0)
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
+                                                  unsigned int txq)
+{
+       dev->egress_subqueue_count = txq;
+       return 0;
+}
 #endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
 #else /* HAVE_TX_MQ */
 #endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
 #else /* HAVE_TX_MQ */
-#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev,
+                                                  unsigned int __always_unused txq)
+{
+       return 0;
+}
 #endif /* HAVE_TX_MQ */
 #endif /* HAVE_TX_MQ */
+#define netif_set_real_num_tx_queues(dev, txq) \
+       _kc_netif_set_real_num_tx_queues(dev, txq)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
 #ifndef ETH_FLAG_RXHASH
 #define ETH_FLAG_RXHASH (1<<28)
 #endif /* ETH_FLAG_RXHASH */
 #ifndef ETH_FLAG_RXHASH
 #define ETH_FLAG_RXHASH (1<<28)
 #endif /* ETH_FLAG_RXHASH */
@@ -3293,7 +3328,7 @@ do {                                                              \
 #define u64_stats_update_begin(a) do { } while(0)
 #define u64_stats_update_end(a) do { } while(0)
 #define u64_stats_fetch_begin(a) do { } while(0)
 #define u64_stats_update_begin(a) do { } while(0)
 #define u64_stats_update_end(a) do { } while(0)
 #define u64_stats_fetch_begin(a) do { } while(0)
-#define u64_stats_fetch_retry_bh(a) (0)
+#define u64_stats_fetch_retry_bh(a,b) (0)
 #define u64_stats_fetch_begin_bh(a) (0)
 
 #if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
 #define u64_stats_fetch_begin_bh(a) (0)
 
 #if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
@@ -3318,6 +3353,16 @@ static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb)
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#ifndef netif_set_real_num_tx_queues
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
+                                                  unsigned int txq)
+{
+       netif_set_real_num_tx_queues(dev, txq);
+       return 0;
+}
+#define netif_set_real_num_tx_queues(dev, txq) \
+       _kc_netif_set_real_num_tx_queues(dev, txq)
+#endif
 #ifndef netif_set_real_num_rx_queues
 static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev,
                                                    unsigned int __always_unused rxq)
 #ifndef netif_set_real_num_rx_queues
 static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev,
                                                    unsigned int __always_unused rxq)
@@ -3364,8 +3409,9 @@ static inline void *_kc_vzalloc(unsigned long size)
 }
 #define vzalloc(_size) _kc_vzalloc(_size)
 
 }
 #define vzalloc(_size) _kc_vzalloc(_size)
 
-#ifndef vlan_get_protocol
-static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \
+     (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0)))
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
 {
        if (vlan_tx_tag_present(skb) ||
            skb->protocol != cpu_to_be16(ETH_P_8021Q))
 {
        if (vlan_tx_tag_present(skb) ||
            skb->protocol != cpu_to_be16(ETH_P_8021Q))
@@ -3376,8 +3422,8 @@ static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
 
        return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
 }
 
        return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
 }
-#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
-#endif
+#endif /* !RHEL5.7+ || RHEL6.0 */
+
 #ifdef HAVE_HW_TIME_STAMP
 #define SKBTX_HW_TSTAMP (1 << 0)
 #define SKBTX_IN_PROGRESS (1 << 2)
 #ifdef HAVE_HW_TIME_STAMP
 #define SKBTX_HW_TSTAMP (1 << 0)
 #define SKBTX_IN_PROGRESS (1 << 2)
@@ -3406,7 +3452,7 @@ static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
 }
 #define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
 #endif /* 2.6.22 -> 2.6.37 */
 }
 #define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
 #endif /* 2.6.22 -> 2.6.37 */
-#ifdef CONFIG_DCB
+#if IS_ENABLED(CONFIG_DCB)
 #ifndef IEEE_8021QAZ_MAX_TCS
 #define IEEE_8021QAZ_MAX_TCS 8
 #endif
 #ifndef IEEE_8021QAZ_MAX_TCS
 #define IEEE_8021QAZ_MAX_TCS 8
 #endif
@@ -3450,13 +3496,15 @@ static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
 #define FCOE_MTU       2158
 #endif
 #endif
 #define FCOE_MTU       2158
 #endif
 #endif
-#ifdef CONFIG_DCB
+#if IS_ENABLED(CONFIG_DCB)
 #ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
 #define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
 #endif
 #endif
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)))
 #define kstrtoul(a, b, c)  ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
 #ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
 #define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
 #endif
 #endif
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)))
 #define kstrtoul(a, b, c)  ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#define kstrtouint(a, b, c)  ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#define kstrtou32(a, b, c)  ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
 #endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
 extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16);
 #endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
 extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16);
@@ -3474,7 +3522,7 @@ extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up);
 #ifndef HAVE_MQPRIO
 #define HAVE_MQPRIO
 #endif /* HAVE_MQPRIO */
 #ifndef HAVE_MQPRIO
 #define HAVE_MQPRIO
 #endif /* HAVE_MQPRIO */
-#ifdef CONFIG_DCB
+#if IS_ENABLED(CONFIG_DCB)
 #ifndef HAVE_DCBNL_IEEE
 #define HAVE_DCBNL_IEEE
 #ifndef IEEE_8021QAZ_TSA_STRICT
 #ifndef HAVE_DCBNL_IEEE
 #define HAVE_DCBNL_IEEE
 #ifndef IEEE_8021QAZ_TSA_STRICT
@@ -3755,6 +3803,25 @@ typedef u32 netdev_features_t;
 #define HAVE_ETHTOOL_GRXFHINDIR_SIZE
 #endif /* SLE_VERSION(11,3,0) */
 #define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q)
 #define HAVE_ETHTOOL_GRXFHINDIR_SIZE
 #endif /* SLE_VERSION(11,3,0) */
 #define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q)
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))
+static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
+                                       u8 *nexthdrp,
+                                       __be16 __always_unused *frag_offp)
+{
+       return ipv6_skip_exthdr(skb, start, nexthdrp);
+}
+#undef ipv6_skip_exthdr
+#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d))
+#endif /* !SLES11sp4 or greater */
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+     !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0)))
+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+{
+       return index % n_rx_rings;
+}
+#endif
+
 #else /* ! < 3.3.0 */
 #define HAVE_ETHTOOL_GRXFHINDIR_SIZE
 #define HAVE_INT_NDO_VLAN_RX_ADD_VID
 #else /* ! < 3.3.0 */
 #define HAVE_ETHTOOL_GRXFHINDIR_SIZE
 #define HAVE_INT_NDO_VLAN_RX_ADD_VID
@@ -3792,6 +3859,14 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
 #else /* NET_ADDR_RANDOM */
 #define eth_hw_addr_random(N) eth_random_addr(N->dev_addr)
 #endif /* NET_ADDR_RANDOM */
 #else /* NET_ADDR_RANDOM */
 #define eth_hw_addr_random(N) eth_random_addr(N->dev_addr)
 #endif /* NET_ADDR_RANDOM */
+
+#ifndef for_each_set_bit_from
+#define for_each_set_bit_from(bit, addr, size) \
+       for ((bit) = find_next_bit((addr), (size), (bit)); \
+                       (bit) < (size); \
+                       (bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit_from */
+
 #else /* < 3.4.0 */
 #include <linux/kconfig.h>
 #endif /* >= 3.4.0 */
 #else /* < 3.4.0 */
 #include <linux/kconfig.h>
 #endif /* >= 3.4.0 */
@@ -4027,7 +4102,9 @@ int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
 #define USE_CONST_DEV_UC_CHAR
 #endif
 
 #define USE_CONST_DEV_UC_CHAR
 #endif
 
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8))
 #define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi)
 #define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi)
+#endif /* !RHEL6.8+ */
 
 #else /* >= 3.7.0 */
 #define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
 
 #else /* >= 3.7.0 */
 #define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
@@ -4065,6 +4142,14 @@ static inline bool __kc_is_link_local_ether_addr(const u8 *addr)
 }
 #define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr)
 #endif /* is_link_local_ether_addr */
 }
 #define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr)
 #endif /* is_link_local_ether_addr */
+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+                      int target, unsigned short *fragoff, int *flags);
+#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e))
+
+#ifndef FLOW_MAC_EXT
+#define FLOW_MAC_EXT   0x40000000
+#endif /* FLOW_MAC_EXT */
+
 #else /* >= 3.8.0 */
 #ifndef __devinit
 #define __devinit
 #else /* >= 3.8.0 */
 #ifndef __devinit
 #define __devinit
@@ -4090,7 +4175,9 @@ static inline bool __kc_is_link_local_ether_addr(const u8 *addr)
 #define HAVE_ENCAP_CSUM_OFFLOAD
 #endif
 
 #define HAVE_ENCAP_CSUM_OFFLOAD
 #endif
 
+#ifndef HAVE_GRE_ENCAP_OFFLOAD
 #define HAVE_GRE_ENCAP_OFFLOAD
 #define HAVE_GRE_ENCAP_OFFLOAD
+#endif
 
 #ifndef HAVE_SRIOV_CONFIGURE
 #define HAVE_SRIOV_CONFIGURE
 
 #ifndef HAVE_SRIOV_CONFIGURE
 #define HAVE_SRIOV_CONFIGURE
@@ -4108,6 +4195,51 @@ static inline bool __kc_is_link_local_ether_addr(const u8 *addr)
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
 
+#undef BUILD_BUG_ON
+#ifdef __CHECKER__
+#define BUILD_BUG_ON(condition) (0)
+#else /* __CHECKER__ */
+#ifndef __compiletime_warning
+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
+#define __compiletime_warning(message) __attribute__((warning(message)))
+#else /* __GNUC__ */
+#define __compiletime_warning(message)
+#endif /* __GNUC__ */
+#endif /* __compiletime_warning */
+#ifndef __compiletime_error
+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
+#define __compiletime_error(message) __attribute__((error(message)))
+#define __compiletime_error_fallback(condition) do { } while (0)
+#else /* __GNUC__ */
+#define __compiletime_error(message)
+#define __compiletime_error_fallback(condition) \
+       do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
+#endif /* __GNUC__ */
+#else /* __compiletime_error */
+#define __compiletime_error_fallback(condition) do { } while (0)
+#endif /* __compiletime_error */
+#define __compiletime_assert(condition, msg, prefix, suffix)           \
+       do {                                                            \
+               bool __cond = !(condition);                             \
+               extern void prefix ## suffix(void) __compiletime_error(msg); \
+               if (__cond)                                             \
+                       prefix ## suffix();                             \
+               __compiletime_error_fallback(__cond);                   \
+       } while (0)
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+       __compiletime_assert(condition, msg, prefix, suffix)
+#define compiletime_assert(condition, msg) \
+       _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else /* __OPTIMIZE__ */
+#define BUILD_BUG_ON(condition) \
+       BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
+#endif /* __OPTIMIZE__ */
+#endif /* __CHECKER__ */
+
 #undef hlist_entry
 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
 
 #undef hlist_entry
 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
 
@@ -4212,9 +4344,15 @@ extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) )
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) )
+#define netdev_notifier_info_to_dev(ptr) ptr
+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)))
+#define HAVE_NDO_SET_VF_LINK_STATE
+#endif
 #else /* >= 3.11.0 */
 #define HAVE_NDO_SET_VF_LINK_STATE
 #define HAVE_SKB_INNER_PROTOCOL
 #else /* >= 3.11.0 */
 #define HAVE_NDO_SET_VF_LINK_STATE
 #define HAVE_SKB_INNER_PROTOCOL
+#define HAVE_MPLS_FEATURES
 #endif /* >= 3.11.0 */
 
 /*****************************************************************************/
 #endif /* >= 3.11.0 */
 
 /*****************************************************************************/
@@ -4293,11 +4431,7 @@ static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb,
 }
 #endif /* !skb_set_hash */
 
 }
 #endif /* !skb_set_hash */
 
-#else
-
-#ifndef HAVE_ENCAP_TSO_OFFLOAD
-#define HAVE_ENCAP_TSO_OFFLOAD
-#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+#else  /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */
 
 #ifndef HAVE_VXLAN_RX_OFFLOAD
 #define HAVE_VXLAN_RX_OFFLOAD
 
 #ifndef HAVE_VXLAN_RX_OFFLOAD
 #define HAVE_VXLAN_RX_OFFLOAD
@@ -4306,7 +4440,7 @@ static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb,
 #ifndef HAVE_VXLAN_CHECKS
 #define HAVE_VXLAN_CHECKS
 #endif /* HAVE_VXLAN_CHECKS */
 #ifndef HAVE_VXLAN_CHECKS
 #define HAVE_VXLAN_CHECKS
 #endif /* HAVE_VXLAN_CHECKS */
-#endif /* !(RHEL_RELEASE_CODE&&RHEL_RELEASE_CODE>=RHEL_RELEASE_VERSION(7,0)) */
+#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */
 
 #ifndef pci_enable_msix_range
 extern int __kc_pci_enable_msix_range(struct pci_dev *dev,
 
 #ifndef pci_enable_msix_range
 extern int __kc_pci_enable_msix_range(struct pci_dev *dev,
@@ -4446,11 +4580,23 @@ static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev,
 #define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
 #endif
 
 #define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
 #endif
 
+#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM
+/* if someone backports this, hopefully they backport as a #define.
+ * declare it as zero on older kernels so that if it get's or'd in
+ * it won't effect anything, therefore preventing core driver changes
+ */
+#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0
+#define SKB_GSO_UDP_TUNNEL_CSUM 0
+#endif
+
 #else
 #define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
 #endif /* 3.16.0 */
 
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) )
 #else
 #define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
 #endif /* 3.16.0 */
 
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) )
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \
+      RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \
+    !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))
 #ifndef timespec64
 #define timespec64 timespec
 static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
 #ifndef timespec64
 #define timespec64 timespec
 static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
@@ -4474,6 +4620,8 @@ static inline struct timespec timespec64_to_timespec(const struct timespec64 ts6
 #define ktime_to_timespec64 ktime_to_timespec
 #define timespec64_add_ns timespec_add_ns
 #endif /* timespec64 */
 #define ktime_to_timespec64 ktime_to_timespec
 #define timespec64_add_ns timespec_add_ns
 #endif /* timespec64 */
+#endif /* !(RHEL6.8<RHEL7.0) && !RHEL7.2+ */
+
 #define hlist_add_behind(_a, _b) hlist_add_after(_b, _a)
 #else
 #define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
 #define hlist_add_behind(_a, _b) hlist_add_after(_b, _a)
 #else
 #define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
@@ -4493,6 +4641,18 @@ extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_l
 #ifndef ETH_P_XDSA
 #define ETH_P_XDSA 0x00F8
 #endif
 #ifndef ETH_P_XDSA
 #define ETH_P_XDSA 0x00F8
 #endif
+/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1))
+#define HAVE_SKBUFF_CSUM_LEVEL
+#endif /* >= RH 7.1 */
+
+#undef GENMASK
+#define GENMASK(h, l) \
+       (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+       (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
 #else /*  3.18.0 */
 #define HAVE_SKBUFF_CSUM_LEVEL
 #define HAVE_SKB_XMIT_MORE
 #else /*  3.18.0 */
 #define HAVE_SKBUFF_CSUM_LEVEL
 #define HAVE_SKB_XMIT_MORE
@@ -4508,6 +4668,12 @@ extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_l
 /* netdev_phys_port_id renamed to netdev_phys_item_id */
 #define netdev_phys_item_id netdev_phys_port_id
 
 /* netdev_phys_port_id renamed to netdev_phys_item_id */
 #define netdev_phys_item_id netdev_phys_port_id
 
+static inline void _kc_napi_complete_done(struct napi_struct *napi,
+                                         int __always_unused work_done) {
+       napi_complete(napi);
+}
+#define napi_complete_done _kc_napi_complete_done
+
 #ifndef NETDEV_RSS_KEY_LEN
 #define NETDEV_RSS_KEY_LEN (13 * 4)
 #endif
 #ifndef NETDEV_RSS_KEY_LEN
 #define NETDEV_RSS_KEY_LEN (13 * 4)
 #endif
@@ -4560,21 +4726,27 @@ static inline int __kc_eth_skb_pad(struct sk_buff *skb)
 #define eth_skb_pad(skb) __kc_eth_skb_pad(skb)
 #endif /* eth_skb_pad && skb_put_padto */
 
 #define eth_skb_pad(skb) __kc_eth_skb_pad(skb)
 #endif /* eth_skb_pad && skb_put_padto */
 
-#ifndef napi_alloc_skb
+#ifndef SKB_ALLOC_NAPI
+/* RHEL 7.2 backported napi_alloc_skb and friends */
 static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length)
 {
        return netdev_alloc_skb_ip_align(napi->dev, length);
 }
 #define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len)
 static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length)
 {
        return netdev_alloc_skb_ip_align(napi->dev, length);
 }
 #define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len)
-#endif /* napi_alloc_skb */
+#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len)
+#endif /* SKB_ALLOC_NAPI */
 #define HAVE_CONFIG_PM_RUNTIME
 #if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
 #define HAVE_CONFIG_PM_RUNTIME
 #if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
-#define NDO_BRIDGE_GETLINK_HAS_FILTER_MASK_PARAM
+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
 #define HAVE_RXFH_HASHFUNC
 #endif /* RHEL_RELEASE_CODE */
 #define HAVE_RXFH_HASHFUNC
 #endif /* RHEL_RELEASE_CODE */
+#ifndef napi_schedule_irqoff
+#define napi_schedule_irqoff   napi_schedule
+#endif
 #else /* 3.19.0 */
 #define HAVE_NDO_FDB_ADD_VID
 #define HAVE_RXFH_HASHFUNC
 #else /* 3.19.0 */
 #define HAVE_NDO_FDB_ADD_VID
 #define HAVE_RXFH_HASHFUNC
+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
 #endif /* 3.19.0 */
 
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) )
 #endif /* 3.19.0 */
 
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) )
@@ -4609,6 +4781,8 @@ static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta)
 #else
 #define HAVE_PTP_CLOCK_INFO_GETTIME64
 #define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
 #else
 #define HAVE_PTP_CLOCK_INFO_GETTIME64
 #define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#define HAVE_PASSTHRU_FEATURES_CHECK
+#define HAVE_NDO_SET_VF_RSS_QUERY_EN
 #endif /* 4,1,0 */
 
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9))
 #endif /* 4,1,0 */
 
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9))
@@ -4631,4 +4805,36 @@ static inline bool page_is_pfmemalloc(struct page __maybe_unused *page)
 #define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
 #endif /* 4.2.0 */
 
 #define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
 #endif /* 4.2.0 */
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0))
+#else
+#define HAVE_NDO_SET_VF_TRUST
+#endif /* 4.4.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0))
+/* protect against a likely backport */
+#ifndef NETIF_F_CSUM_MASK
+#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM
+#endif /* NETIF_F_CSUM_MASK */
+#ifndef NETIF_F_SCTP_CRC
+#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM
+#endif /* NETIF_F_SCTP_CRC */
+#else
+#define HAVE_GENEVE_RX_OFFLOAD
+#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD
+#endif /* 4.5.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0))
+static inline void napi_consume_skb(struct sk_buff *skb,
+                                   int __always_unused budget)
+{
+       dev_consume_skb_any(skb);
+}
+
+static inline void page_ref_inc(struct page *page)
+{
+       atomic_inc(&page->_count);
+}
+
+#endif /* 4.6.0 */
+
 #endif /* _KCOMPAT_H_ */
 #endif /* _KCOMPAT_H_ */