]> review.fuel-infra Code Review - packages/trusty/i40e-dkms.git/commitdiff
Cherry-pick from 8b5eba2 (master). Update i40e-dkms kernel module up to the ver 1... 91/16591/4
authorAlbert <asyriy@mirantis.com>
Mon, 1 Feb 2016 10:48:55 +0000 (12:48 +0200)
committerAlbert <asyriy@mirantis.com>
Mon, 1 Feb 2016 15:36:25 +0000 (17:36 +0200)
The commit is a cherry-pick from the master branch, commit 8b5eba2.
The debian/changelog file was updated to have the correct branch
value 8.0 (for the current branch) in the header.

Change-Id: I5541c6cbddab4ae7fce845c0009327904cf05db9

58 files changed:
debian/changelog
debian/copyright
debian/prerm
debian/rules
i40e-dkms-1.2.48/i40e-1.2.48/SUMS [deleted file]
i40e-dkms-1.2.48/i40e-1.2.48/dkms.conf [deleted file]
i40e-dkms-1.2.48/i40e-1.2.48/i40e.7 [deleted file]
i40e-dkms-1.3.47/Makefile [moved from i40e-dkms-1.2.48/Makefile with 100% similarity]
i40e-dkms-1.3.47/common.postinst [moved from i40e-dkms-1.2.48/common.postinst with 100% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/COPYING [moved from i40e-dkms-1.2.48/i40e-1.2.48/COPYING with 100% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/README [moved from i40e-dkms-1.2.48/i40e-1.2.48/README with 50% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/SUMS [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/dkms.conf [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/i40e.7 [new file with mode: 0755]
i40e-dkms-1.3.47/i40e-1.3.47/i40e.spec [moved from i40e-dkms-1.2.48/i40e-1.2.48/i40e.spec with 99% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/pci.updates [moved from i40e-dkms-1.2.48/i40e-1.2.48/pci.updates with 60% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/scripts/dump_tables [new file with mode: 0755]
i40e-dkms-1.3.47/i40e-1.3.47/scripts/set_irq_affinity [moved from i40e-dkms-1.2.48/i40e-1.2.48/scripts/set_irq_affinity with 84% similarity, mode: 0755]
i40e-dkms-1.3.47/i40e-1.3.47/src/Makefile [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/Makefile with 79% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/Kbuild [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/Kbuild with 100% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/Module.supported [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/Module.supported with 100% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e.h with 80% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_adminq.c with 95% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_adminq.h with 94% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq_cmd.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_adminq_cmd.h with 96% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_alloc.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_alloc.h with 100% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_common.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_common.c with 91% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_configfs.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_configfs.c with 93% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_dcb.c with 53% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_dcb.h with 81% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb_nl.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_dcb_nl.c with 94% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_debugfs.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_debugfs.c with 89% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_devids.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_diag.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_diag.c with 96% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_diag.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_diag.h with 100% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ethtool.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_ethtool.c with 75% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_fcoe.c with 97% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_fcoe.h with 97% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_helper.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_hmc.c with 91% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_hmc.h with 96% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_lan_hmc.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_lan_hmc.c with 98% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_lan_hmc.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_lan_hmc.h with 100% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_main.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_main.c with 85% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_nvm.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_nvm.c with 76% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_osdep.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_osdep.h with 99% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_prototype.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_prototype.h with 95% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ptp.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_ptp.c with 90% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_register.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_register.h with 99% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_status.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_status.h with 100% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_txrx.c with 87% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_txrx.h with 79% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_type.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_type.h with 91% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_virtchnl.h with 94% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_virtchnl_pf.c with 88% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_virtchnl_pf.h with 92% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.c [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/kcompat.c with 97% similarity]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.h [moved from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/kcompat.h with 95% similarity]

index 708751597db5916b5e1c82ca31273725a3afcafa..8aee65f3ea2774a945574c14075380fe450c2533 100755 (executable)
@@ -1,10 +1,17 @@
+i40e-dkms (1.3.47-1~u14.04+mos1) MOS 8.0; urgency=low
+
+  * Update the driver i40e version up to 1.3.47
+
+ -- Mirantis Openstack Linux Team <mos-linux@mirantis.com>  Fri, 29 January 2016 08:17:42 +0000
+ -- The sources were taken from http://sourceforge.net/projects/e1000/files/i40e%20stable/1.3.47/
+
 i40e-dkms (1.2.48-1~u14.04+mos2) MOS 7.0 Divergent package; urgency=low
 
   * Fix dkms.conf to be able build the package in Ubuntu chroot from CentOS
 
  -- Mirantis Openstack Linux Team <mos-linux@mirantis.com>  Tue, 18 August 2015 18:55:19 +0000
 
-  * Update the driver i40e version to 1.2.48 
+  * Update the driver i40e version to 1.2.48 .
 
  -- Mirantis Openstack Linux Team <mos-linux@mirantis.com>  Thu, 30 July 2015 16:45:27 +0000
  -- The sources were taken from http://sourceforge.net/projects/e1000/files/i40e%20stable/1.2.48/
index 92fcd2da17f48f351de931f0432a51cd1b1ca46d..ea1197fd419e8bc05a1204856e3f8b17b44f4362 100755 (executable)
@@ -1,7 +1,7 @@
 
 Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
 Upstream-Name: i40e-dkms
-Source: http://sourceforge.net/projects/e1000/files/i40e%20stable/1.2.48/
+Source: http://sourceforge.net/projects/e1000/files/i40e%20stable/1.3.47/
 
 Files: *
 Copyright: Copyright(c) 2013 - 2015 Intel Corporation.
index e398edbfcbec4f71c5d7f8cacf66a24d755caf67..7254308d84c95bd055af662f5a960543450f0aa6 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 NAME=i40e
-VERSION=1.2.48
+VERSION=1.3.47
 
 set -e
 
index 293ecf4cf7a86d1875dce259cb34af9b7426262b..d2a9f0fd78e9b6300c4d7e7d3406a844a305bc56 100755 (executable)
@@ -6,7 +6,7 @@
 
 DEB_NAME=i40e
 NAME=i40e
-VERSION=1.2.48
+VERSION=1.3.47
 
 configure: configure-stamp
 configure-stamp:
diff --git a/i40e-dkms-1.2.48/i40e-1.2.48/SUMS b/i40e-dkms-1.2.48/i40e-1.2.48/SUMS
deleted file mode 100644 (file)
index 9de4c78..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-29761     3 i40e-1.2.48/pci.updates
-42996     7 i40e-1.2.48/scripts/set_irq_affinity
-17910     3 i40e-1.2.48/i40e.7
-18098     6 i40e-1.2.48/src/i40e/i40e_diag.c
-51209    36 i40e-1.2.48/src/i40e/i40e_nvm.c
-50800     5 i40e-1.2.48/src/i40e/i40e_virtchnl_pf.h
-29128     6 i40e-1.2.48/src/i40e/i40e_lan_hmc.h
-25642    81 i40e-1.2.48/src/i40e/i40e_debugfs.c
-50254     9 i40e-1.2.48/src/i40e/i40e_dcb_nl.c
-62964     5 i40e-1.2.48/src/i40e/i40e_fcoe.h
-21888    11 i40e-1.2.48/src/i40e/i40e_hmc.c
-16950    66 i40e-1.2.48/src/i40e/i40e_adminq_cmd.h
-41524    21 i40e-1.2.48/src/i40e/i40e_dcb.c
-39150    29 i40e-1.2.48/src/i40e/i40e_adminq.c
-33767     2 i40e-1.2.48/src/i40e/Kbuild
-48258    64 i40e-1.2.48/src/i40e/i40e_virtchnl_pf.c
-51297     9 i40e-1.2.48/src/i40e/i40e_hmc.h
-50384    22 i40e-1.2.48/src/i40e/i40e_ptp.c
-60649     2 i40e-1.2.48/src/i40e/i40e_diag.h
-45048    46 i40e-1.2.48/src/i40e/i40e_fcoe.c
-45439    49 i40e-1.2.48/src/i40e/i40e_type.h
-45111    51 i40e-1.2.48/src/i40e/kcompat.c
-44588     1 i40e-1.2.48/src/i40e/Module.supported
-32162    20 i40e-1.2.48/src/i40e/i40e_prototype.h
-32982   151 i40e-1.2.48/src/i40e/i40e_common.c
-07400     3 i40e-1.2.48/src/i40e/i40e_alloc.h
-28762   220 i40e-1.2.48/src/i40e/i40e_register.h
-14152    12 i40e-1.2.48/src/i40e/i40e_txrx.h
-24191   132 i40e-1.2.48/src/i40e/kcompat.h
-03090    88 i40e-1.2.48/src/i40e/i40e_ethtool.c
-27945    83 i40e-1.2.48/src/i40e/i40e_txrx.c
-58435     5 i40e-1.2.48/src/i40e/i40e_adminq.h
-38307     6 i40e-1.2.48/src/i40e/i40e_dcb.h
-39855    42 i40e-1.2.48/src/i40e/i40e_lan_hmc.c
-22745     4 i40e-1.2.48/src/i40e/i40e_status.h
-46379    28 i40e-1.2.48/src/i40e/i40e.h
-47823   298 i40e-1.2.48/src/i40e/i40e_main.c
-35379    12 i40e-1.2.48/src/i40e/i40e_virtchnl.h
-42758    11 i40e-1.2.48/src/i40e/i40e_configfs.c
-34914     4 i40e-1.2.48/src/i40e/i40e_osdep.h
-36221     9 i40e-1.2.48/src/Makefile
-56582    10 i40e-1.2.48/i40e.spec
-02733    18 i40e-1.2.48/COPYING
-34717    31 i40e-1.2.48/README
diff --git a/i40e-dkms-1.2.48/i40e-1.2.48/dkms.conf b/i40e-dkms-1.2.48/i40e-1.2.48/dkms.conf
deleted file mode 100644 (file)
index 2a515d6..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-MAKE="make -C src/ KERNELDIR=/lib/modules/${kernelver}/build  BUILD_KERNEL=${kernelver}"
-CLEAN="make -C src/ clean"
-BUILT_MODULE_NAME=i40e
-BUILT_MODULE_LOCATION=src/
-DEST_MODULE_LOCATION="/updates"
-PACKAGE_NAME=i40e-dkms
-PACKAGE_VERSION=1.2.48
-REMAKE_INITRD=no
diff --git a/i40e-dkms-1.2.48/i40e-1.2.48/i40e.7 b/i40e-dkms-1.2.48/i40e-1.2.48/i40e.7
deleted file mode 100755 (executable)
index 46d9dab..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-.\" LICENSE
-.\"
-.\" This software program is released under the terms of a license agreement between you ('Licensee') and Intel.  Do not use or load this software or any associated materials (collectively, the 'Software') until you have carefully read the full terms and conditions of the LICENSE located in this software package.  By loading or using the Software, you agree to the terms of this Agreement.  If you do not agree with the terms of this Agreement, do not install or use the Software.
-.\"
-.\" * Other names and brands may be claimed as the property of others.
-.\"
-.TH i40e 1 "March 31, 2015"
-
-.SH NAME
-i40e \- This file describes the Linux* Base Driver for the Intel Ethernet
-Controller XL710 Family of Controllers.
-.SH SYNOPSIS
-.PD 0.4v
-modprobe i40e [<option>=<VAL1>,<VAL2>,...]
-.br
-.PD 1v
-.LP
-.SH DESCRIPTION
-This driver supports the \fB2.6.32\fR and newer kernels, and includes support
-for any 64 bit Linux supported system, including Itanium(R)2, x86_64, PPC64,
-ARM, etc.
-.LP
-This driver is only supported as a loadable module at this time.  Intel is not
-supplying patches against the kernel source to allow for static linking of the
-driver.  For questions related to hardware requirements, refer to the
-documentation supplied with your adapter.  All hardware requirements listed
-apply to use with Linux.
-.SH JUMBO FRAMES
-The driver supports Jumbo Frames.  Jumbo Frames support is enabled by changing
-the MTU to a value larger than the default of 1500.  Use the ifconfig command
-to increase the MTU size.  For example, enter the following where ethX is the
-interface name:
-.IP
-ifconfig ethX mtu 9000 up
-.LP
-The maximum MTU setting for Jumbo Frames is 9706.  This value coincides with
-the maximum Jumbo Frames size of 9728.  This driver will attempt to use
-multiple page sized buffers to receive each jumbo packet.  This should help to
-avoid buffer starvation issues when allocating receive packets.
-.SH SUPPORT
-For additional information, including supported adapters, building, and
-installation, see the README file included with the driver.
-.LP
-For general information and support, go to the Intel support website at:
-.IP
-.B http://support.intel.com
-.LP
-If an issue is identified with the released source code on a supported kernel
-with a supported adapter, email the specific information related to the issue
-to e1000-devel@lists.sourceforge.net.
-.LP
similarity index 50%
rename from i40e-dkms-1.2.48/i40e-1.2.48/README
rename to i40e-dkms-1.3.47/i40e-1.3.47/README
index 8ae616abe42081b48bba1ff5310704bd9356a7ca..9e82a6110467e638b8fdbbd0b2d2b8fa51f5c844 100755 (executable)
@@ -1,14 +1,13 @@
+
 i40e Linux* Base Driver for the Intel(R) XL710 Ethernet Controller Family
 ===============================================================================
 
-
 ===============================================================================
 
-June 12, 2015
+September 25, 2015
 
 ===============================================================================
 
-
 Contents
 --------
 
@@ -21,48 +20,79 @@ Contents
 - Known Issues
 
 
-===============================================================================
+================================================================================
+
+
+Important Notes
+---------------
+
+Configuring SR-IOV for improved network security
+------------------------------------------------
+
+In a virtualized environment, on Intel(R) Server Adapters that support SR-IOV,
+the virtual function (VF) may be subject to malicious behavior. Software-
+generated layer two frames, like IEEE 802.3x (link flow control), IEEE 802.1Qbb
+(priority based flow-control), and others of this type, are not expected and
+can throttle traffic between the host and the virtual switch, reducing
+performance. To resolve this issue, configure all SR-IOV enabled ports for
+VLAN tagging. This configuration allows unexpected, and potentially malicious,
+frames to be dropped.
+
 
 
 Overview
 --------
 
-This document describes the i40e Linux* Base Driver for the XL710 Ethernet
-Controller Family of Adapters. The Linux base driver supports the 2.6.32 and
-newer kernels and includes support for Linux supported x86_64 systems.
+This document describes the i40e Linux* Base Driver for the XL710 Ethernet Controller Family of Adapters.
+
+The Linux* base driver supports the following kernel versions:
+2.6.32 and newer
 
-The following features are available in supported kernels:
+It includes support for Linux supported x86_64 systems.
 
+This driver is only supported as a loadable module at this time. Intel is
+not supplying patches against the kernel source to allow for static linking of
+the drivers.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel adapter. All hardware requirements listed apply to
+use with Linux.
+
+The following features are now available in supported kernels:
 - Native VLANs
 - Channel Bonding (teaming)
 - SNMP
 - Generic Receive Offload
 
 Adapter teaming is implemented using the native Linux Channel bonding
-module. This is included in supported Linux kernels. Channel Bonding
-documentation can be found in the Linux kernel source:
-/Documentation/networking/bonding.txt
+module. This is included in supported Linux kernels.
+Channel Bonding documentation can be found in the Linux kernel source:
+/documentation/networking/bonding.txt
+
+The driver information previously displayed in the /proc file system is not
+supported in this release.
+
+Driver information can be obtained using ethtool, lspci, and ifconfig.
+Instructions on updating ethtool can be found in the section Additional 
+Configurations later in this document.
 
-Driver information can be obtained using the ethtool, lspci, or iproute2
-ip command. Instructions on updating ethtool can be found in the
-Additional Features & Configurations section later on this page.
 
 
 Identifying Your Adapter
 ------------------------
-
 The driver in this release is compatible with XL710 and X710-based Intel
 Ethernet Network Connections.
 
-For more information on how to identify your adapter, go to the Adapter &
+For information on how to identify your adapter, go to the Adapter &
 Driver ID Guide at:
 http://support.intel.com/support/go/network/adapter/proidguide.htm
 
-For the best performance, make sure the latest NVM/FW is installed on
-your device and that you are using the newest drivers. For the latest 
-NVM/FW images and Intel network drivers for Linux, refer to the following
-website. Select the link for your adapter.
-http://support.intel.com/support/go/network/adapter/home.htm
+For the best performance, make sure the latest NVM/FW is installed on your device
+ and that you are using the newest drivers.
+
+For the latest NVM/FW images and Intel network drivers, refer to the
+following website and select your adapter.
+http://www.intel.com/support
 
 
 SFP+ Devices with Pluggable Optics
@@ -70,89 +100,83 @@ SFP+ Devices with Pluggable Optics
 
 SR Modules
 ----------
-  Intel     DUAL RATE 1G/10G SFP+ SR (bailed)    E10GSFPSR
+  Intel        DUAL RATE 1G/10G SFP+ SR (bailed)       E10GSFPSR
 
 LR Modules
-----------
-  Intel     DUAL RATE 1G/10G SFP+ LR (bailed)    E10GSFPLR
+---------- 
+  Intel        DUAL RATE 1G/10G SFP+ LR (bailed        E10GSFPLR
 
 1G SFP Modules
-----------
-The following is a list of 3rd party 1G SFP modules that have received some
+--------------
+The following is a list of 3rd party SFP modules that have received some
 testing. Not all modules are applicable to all devices.
 
-Supplier    Type                Part Numbers
-  Finisar   1000BASE-T SFP      FCLF-8251-3
-  Kinnex A  1000BASE-T SFP      XSFP-T-RJ12-0101-DLL
-  Avago     1000BASE-T SFP      ABCU-5710RZ
+Supplier       Type            Part Numbers
+Finisar                1000BASE-T      SFP FCLF-8251-3
+Kinnex A       1000BASE-T      SFP XSFP-T-RJ12-0101-DLL
+Avago          1000BASE-T      SFP ABCU-5710RZ
 
 QSFP+ Modules
 -------------
-  Intel     TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed)    E40GQSFPSR
-  QSFP+ 1G speed is not supported on XL710 based devices.
+  Intel        TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed)        E40GQSFPSR
+    QSFP+ 1G speed is not supported on XL710 based devices.
 
 X710/XL710 Based SFP+ adapters support passive QSFP+ Direct Attach cables.
 Intel recommends using Intel optics and cables. Other modules may function
 but are not validated by Intel. Contact Intel for supported media types.
 
 
-===============================================================================
+================================================================================
 
 
 Building and Installation
 -------------------------
 
-To build a binary RPM* package of this driver, run 'rpmbuild -tb i40e*.tar.gz'.
+To build a binary RPM* package of this driver, run 'rpmbuild -tb
+i40e-<x.x.x>.tar.gz', where <x.x.x> is the version number for the driver tar file.
 
 NOTES:
-  - For the build to work properly, the currently running kernel MUST match
-    the version and configuration of the installed kernel sources. If you have
-    just recompiled the kernel, reboot the system before building.
-  - RPM functionality has only been tested in Red Hat distributions.
+
+- For the build to work properly, the currently running kernel MUST match
+  the version and configuration of the installed kernel sources. If you have
+  just recompiled the kernel reboot the system before building.
+- RPM functionality has only been tested in Red Hat distributions.
 
 1. Move the base driver tar file to the directory of your choice. For
    example, use '/home/username/i40e' or '/usr/local/src/i40e'.
 
 2. Untar/unzip the archive, where <x.x.x> is the version number for the
    driver tar file:
-
-     tar zxf i40e-<x.x.x>.tar.gz
+   tar zxf i40e-<x.x.x>.tar.gz
 
 3. Change to the driver src directory, where <x.x.x> is the version number
    for the driver tar:
-
-     cd i40e-<x.x.x>/src/
+   cd i40e-<x.x.x>/src/
 
 4. Compile the driver module:
-
-     # make install
-
+   # make install
    The binary will be installed as:
-
-     /lib/modules/`uname -r`/kernel/drivers/net/i40e/i40e.ko
+   /lib/modules/<KERNEL VERSION>/kernel/drivers/net/i40e/i40e.[k]o
 
    The install location listed above is the default location. This may differ
    for various Linux distributions.
 
 5. Load the module using the modprobe command:
-
-     modprobe i40e <parameter>=<value>
+   modprobe <i40e> [parameter=port1_value,port2_value]
 
    Make sure that any older i40e drivers are removed from the kernel before
    loading the new module:
+   rmmod i40e; modprobe i40e
 
-     rmmod i40e; modprobe i40e
-
-6. Assign an IP address to the interface by entering the following, where ethX
-   is the interface name that was shown in dmesg after modprobe:
-
-     ip address add <IP_address>/<netmask bits> dev ethX
+6. Assign an IP address to the interface by entering the following,
+   where ethX is the interface name that was shown in dmesg after modprobe:
+   
+   ip address add <IP_address>/<netmask bits> dev ethX
 
 7. Verify that the interface works. Enter the following, where IP_address
    is the IP address for another machine on the same subnet as the interface
    that is being tested:
-
-     ping <IP_address>
+   ping <IP_address>
 
 NOTE:
    For certain distributions like (but not limited to) RedHat Enterprise
@@ -165,12 +189,11 @@ NOTE:
        # update-initramfs -u
 
 
-==============================================================================
+================================================================================
 
 
 Command Line Parameters
 -----------------------
-
 In general, ethtool and other OS specific commands are used to configure user
 changeable parameters after the driver is loaded. The i40e driver only supports
 the max_vfs kernel parameter on older kernels that do not have the standard
@@ -180,44 +203,68 @@ parameter that can control the default logging verbosity of the driver.
 If the driver is built as a module, the following optional parameters are used
 by entering them on the command line with the modprobe command using this
 syntax:
+modprobe i40e [<option>=<VAL1>]
 
-      modprobe i40e [<option>=<VAL1>]
-
-For example:  modprobe i40e max_vfs=7
+There needs to be a <VAL#> for each network port in the system supported by
+this driver. The values will be applied to each instance, in function order.
+For example:
+modprobe i40e max_vfs=7
 
-The default value for each parameter is generally the recommended setting
+The default value for each parameter is generally the recommended setting,
 unless otherwise noted.
 
-max_vfs
--------
-Valid Range:     1-32 (X710 based devices)
-                 1-64 (XL710 based devices)
 
-Default Value:   0
 
+max_vfs
+-------
+Valid Range:
+1-32 (X710 based devices)
+1-64 (XL710 based devices)
 NOTE: This parameter is only used on kernel 3.7.x and below. On kernel 3.8.x
 and above, use sysfs to enable VFs. For example:
-#echo $num_vf_enabled > /sys/class/net/$dev/device/sriov_numvfs   //enable VFs
-#echo 0 > /sys/class/net/$dev/device/sriov_numvfs                 //disable VFs
-
-This parameter adds support for SR-IOV. It causes the driver to spawn up
-to max_vfs worth of virtual functions. Some hardware configurations
-support fewer SR-IOV instances, as the whole XL710 controller (all
-functions) is limited to 128 SR-IOV interfaces in total.
-
-NOTE: When SR-IOV mode is enabled, hardware VLAN filtering and VLAN tag
-stripping/insertion will remain enabled. The old VLAN filter should be
-removed before the new VLAN filter is added. For example:
-
-    ip link set eth0 vf 0 vlan 100     // set vlan 100 for VF 0
-    ip link set eth0 vf 0 vlan 0       // delete vlan 100
-    ip link set eth0 vf 0 vlan 200     // set a new vlan 200 for VF 0
-
-
-
-Intel(R) i40e Ethernet Flow Director
-------------------------------------
-
+#echo $num_vf_enabled > /sys/class/net/$dev/device/sriov_numvfs        //enable VFs
+#echo 0 > /sys/class/net/$dev/device/sriov_numvfs      //disable VFs
+This parameter adds support for SR-IOV. It causes the driver to spawn up to
+max_vfs worth of virtual functions.
+Some hardware configurations support fewer SR-IOV instances, as the whole
+XL710 controller (all functions) is limited to 128 SR-IOV interfaces in total.
+NOTE: When SR-IOV mode is enabled, hardware VLAN filtering
+and VLAN tag stripping/insertion will remain enabled. Please remove the old
+VLAN filter before the new VLAN filter is added. For example,
+ip link set eth0 vf 0 vlan 100 // set vlan 100 for VF 0
+ip link set eth0 vf 0 vlan 0   // Delete vlan 100
+ip link set eth0 vf 0 vlan 200 // set a new vlan 200 for VF 0
+
+
+Configuring SR-IOV for improved network security
+------------------------------------------------
+
+In a virtualized environment, on Intel(R) Server Adapters that support SR-IOV,
+the virtual function (VF) may be subject to malicious behavior. Software-
+generated layer two frames, like IEEE 802.3x (link flow control), IEEE 802.1Qbb
+(priority based flow-control), and others of this type, are not expected and
+can throttle traffic between the host and the virtual switch, reducing
+performance. To resolve this issue, configure all SR-IOV enabled ports for
+VLAN tagging. This configuration allows unexpected, and potentially malicious,
+frames to be dropped.
+
+
+Configuring VLAN tagging on SR-IOV enabled adapter ports
+--------------------------------------------------------
+
+To configure VLAN tagging for the ports on an SR-IOV enabled adapter,
+use the following command. The VLAN configuration should be done 
+before the VF driver is loaded or the VM is booted.
+
+$ ip link set dev <PF netdev id> vf <id> vlan <vlan id>
+
+For example, the following instructions will configure PF eth0 and 
+the first VF on VLAN 10.
+$ ip link set dev eth0 vf 0 vlan 10
+.
+
+Intel(R) Ethernet Flow Director
+-------------------------------
 The Flow Director performs the following tasks:
 
   - Directs receive packets according to their flows to different queues.
@@ -241,41 +288,41 @@ ethtool commands:
 
        # ethtool -K ethX ntuple <on|off>
 
-    When disabling ntuple filters all the user programed filters are flushed
-    from the driver cache and hardware. Filters must be re-added if they are
-    needed when ntuple is re-enabled.
+       When disabling ntuple filters all the user programed filters are flushed
+       from the driver cache and hardware. Filters must be re-added if they are
+       needed when ntuple is re-enabled.
 
   - To add a filter that directs packet to queue 2, use -U or -N switch
 
-        # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
-          192.168.10.2 src-port 2000 dst-port 2001  action 2 [loc 1]
+       # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+       192.168.10.2 src-port 2000 dst-port 2001 action 2 [loc 1]
 
   - To see the list of filters currently present
        # ethtool <-u|-n> ethX
 
-  Application Targeted Routing (ATR) Perfect Filters
-  --------------------------------------------------
-  ATR is enabled by default when the kernel is in multiple transmit queue mode.
-  An ATR flow director filter rule is added when a TCP-IP flow starts and is
-  deleted when the flow ends. When a TCP-IP Flow Director rule is added from
-  ethtool (Sideband filter), ATR is turned off by the driver. To re-enable ATR,
-  the sideband can be disabled with the ethtool -K option. If sideband is
-  re-enabled after ATR is re-enabled, ATR remains enabled until a TCP-IP flow
-  is added. When all TCP-IP sideband rules are deleted, ATR is automatically
-  re-enabled.
-
-  Packets that match the ATR rules are counted in fdir_atr_match stats in
-  ethtool, which also can be used to verify whether ATR rules still exist.
-
-  Sideband Perfect Filters
-  ------------------------
-  Sideband Perfect Filters is an interface for loading the filter table that
-  funnels all flow into queue_0 unless an alternative queue is specified
-  using "action." If action is used, any flow that matches the filter criteria
-  will be directed to the appropriate queue. Rules may be deleted from the
-  table. This is done via
-
-        ethtool -U ethX delete N
+Application Targeted Routing (ATR) Perfect Filters
+--------------------------------------------------
+ATR is enabled by default when the kernel is in multiple transmit queue mode.
+An ATR flow director filter rule is added when a TCP-IP flow starts and is
+deleted when the flow ends. When a TCP-IP Flow Director rule is added from
+ethtool (Sideband filter), ATR is turned off by the driver. To re-enable ATR,
+the sideband can be disabled with the ethtool -K option. If sideband is
+re-enabled after ATR is re-enabled, ATR remains enabled until a TCP-IP flow
+is added. When all TCP-IP sideband rules are deleted, ATR is automatically
+re-enabled.
+
+Packets that match the ATR rules are counted in fdir_atr_match stats in
+ethtool, which also can be used to verify whether ATR rules still exist.
+
+Sideband Perfect Filters
+------------------------
+Sideband Perfect Filters is an interface for loading the filter table that
+funnels all flow into queue_0 unless an alternative queue is specified
+using "action." If action is used, any flow that matches the filter criteria
+will be directed to the appropriate queue. Rules may be deleted from the
+table. This is done via
+
+  ethtool -U ethX delete N
 
   where N is the rule number to be deleted, as specified in the loc value in
   the filter add command.
@@ -286,34 +333,35 @@ ethtool commands:
   In addition, rx-N.rx_packets shows the number of packets processed by the
   Nth queue.
 
-  NOTES:
+NOTES:
+Receive Packet Steering (RPS) and Receive Flow Steering (RFS) are not compatible
+with Flow Director. If Flow Director is enabled, these will be disabled.
 
-    - Receive Packet Steering (RPS) and Receive Flow Steering (RFS) are not
-      compatible with Flow Director. If Flow Director is enabled, these will
-      be disabled.
-    - The VLAN field for Flow Director is not explicitly  supported in the i40e
-      driver.
+The VLAN field for Flow Director is not explicitly supported in the i40e
+driver.
 
-  When filter rules are added from Sideband or ATR and the Flow Director filter
-  table is full, the ATR rule is turned off by the driver. Subsequently, the
-  Sideband filter rule is then turned off. When space becomes available in the
-  filter table through filter rule deletion (i.e., an ATR rule or Sideband rule
-  is deleted), the Sideband and ATR rule additions are turned back on.
+When filter rules are added from Sideband or ATR and the Flow Director filter
+table is full, the ATR rule is turned off by the driver. Subsequently, the
+Sideband filter rule is then turned off. When space becomes available in the
+filter table through filter rule deletion (i.e., an ATR rule or Sideband rule
+is deleted), the Sideband and ATR rule additions are turned back on.
 
-  Occasionally, when the filter table is full, you will notice HW errors when
-  you try to add new rules. The i40e driver will call for a filter flush and
-  sideband filter list replay. This will help flush any stale ATR rules and 
-  create space.
+Occasionally, when the filter table is full, you will notice HW errors when
+you try to add new rules. The i40e driver will call for a filter flush and
+sideband filter list replay. This will help flush any stale ATR rules and
+create space.
 
 
-==============================================================================
+================================================================================
 
 
-Additional Features & Configurations
-------------------------------------
+Additional Features and Configurations
+-------------------------------------------
+
 
 Configuring the Driver on Different Distributions
 -------------------------------------------------
+
 Configuring a network driver to load properly when the system is started is
 distribution dependent. Typically, the configuration process involves adding
 an alias line to /etc/modules.conf or /etc/modprobe.conf as well as editing
@@ -321,184 +369,207 @@ other system startup scripts and/or configuration files. Many popular Linux
 distributions ship with tools to make these changes for you. To learn the
 proper way to configure a network device for your system, refer to your
 distribution documentation. If during this process you are asked for the
-driver or module name, the name for the Linux Base Driver for the
-Intel(R) 40 Gigabit PCI Express Family of Adapters is i40e.
+driver or module name, the name for the Base Driver is i40e.
+
 
 Viewing Link Messages
 ---------------------
+
 Link messages will not be displayed to the console if the distribution is
 restricting system messages. In order to see network driver link messages on
 your console, set dmesg to eight by entering the following:
-
-    # dmesg -n 8
+dmesg -n 8
 
 NOTE: This setting is not saved across reboots.
 
+
 Jumbo Frames
 ------------
-Jumbo Frames support is enabled by changing the MTU to a value larger than
-the default of 1500 bytes. The maximum value for the MTU is 9706. Use the
-ifconfig command to increase the MTU size. For example, enter the following
-where <x> is the interface number:
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit
+(MTU) to a value larger than the default value of 1500.
 
-    # ifconfig ethx mtu 9000 up
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number:
+
+   ifconfig eth<x> mtu 9000 up
 
 This setting is not saved across reboots. The setting change can be made
-permanent by adding MTU = 9000 to the file
+permanent by adding 'MTU=9000' to the file:
 /etc/sysconfig/network-scripts/ifcfg-eth<x> for RHEL or to the file
 /etc/sysconfig/network/<config_file> for SLES.
 
-The maximum MTU setting for Jumbo Frames is 9706. This value coincides with
-the maximum Jumbo Frames size of 9728. This driver will attempt to use
-multiple page sized buffers to receive each jumbo packet. This should help
-avoid buffer starvation issues when allocating receive packets.
+NOTES:
+- The maximum MTU setting for Jumbo Frames is 9706. This value coincides
+  with the maximum Jumbo Frames size of 9728 bytes.
+- This driver will attempt to use multiple page sized buffers to receive
+  each jumbo packet. This should help to avoid buffer starvation issues
+  when allocating receive packets.
+
 
 ethtool
 -------
-The driver uses the ethtool interface for driver configuration and
+The driver utilizes the ethtool interface for driver configuration and
 diagnostics, as well as displaying statistical information. The latest
-ethtool version is required for this functionality.
+ethtool version is required for this functionality. Download it at
+http://ftp.kernel.org/pub/software/network/ethtool/
 
-The latest release of ethtool can be found at:
-http://sourceforge.net/projects/gkernel
+Supported ethtool Commands and Options
+--------------------------------------
+-n --show-nfc
+  Retrieves the receive network flow classification configurations.
 
-  Supported ethtool Commands and Options
-  --------------------------------------
-  -n --show-nfc
-    Retrieves the receive network flow classification configurations.
+rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6
+  Retrieves the hash options for the specified network traffic type.
 
-  rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6
-    Retrieves the hash options for the specified network traffic type.
+-N --config-nfc
+  Configures the receive network flow classification.
 
-  -N --config-nfc
-    Configures the receive network flow classification.
+rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 m|v|t|s|d|f|n|r...
+  Configures the hash options for the specified network traffic type.
 
-  rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 m|v|t|s|d|f|n|r...
-    Configures the hash options for the specified network traffic type.
+  udp4 UDP over IPv4
+  udp6 UDP over IPv6
 
-    udp4 UDP over IPv4
-    udp6 UDP over IPv6
+  f Hash on bytes 0 and 1 of the Layer 4 header of the rx packet.
+  n Hash on bytes 2 and 3 of the Layer 4 header of the rx packet.
 
-    f Hash on bytes 0 and 1 of the Layer 4 header of the rx packet.
-    n Hash on bytes 2 and 3 of the Layer 4 header of the rx packet.
 
 NAPI
 ----
 NAPI (Rx polling mode) is supported in the i40e driver.
+For more information on NAPI, see
+ftp://robur.slu.se/pub/Linux/net-development/NAPI/usenix-paper.tgz.
 
-See ftp://robur.slu.se/pub/Linux/net-development/NAPI/usenix-paper.tgz for
-more information on NAPI.
 
 Flow Control
 ------------
-Flow control is disabled by default. To enable it, use ethtool:
 
-    # ethtool -A eth? autoneg off rx on tx on
+Ethernet Flow Control (IEEE 802.3x) can be configured with ethtool to enable
+receiving and transmitting pause frames for i40e. When transmit is enabled,
+pause frames are generated when the receive packet buffer crosses a predefined
+threshold. When receive is enabled, the transmit unit will halt for the time
+delay specified when a pause frame is received. 
+
+Flow Control is disabled by default.
+
+Use ethtool to change the flow control settings.
+
+ethtool:
+ethtool -A eth? autoneg off rx on tx on
 
 NOTE: You must have a flow control capable link partner.
 
-MAC and VLAN Anti-spoofing Feature
+
+MAC and VLAN anti-spoofing feature
 ----------------------------------
-When a malicious driver attempts to send a spoofed packet, it is dropped by
-the hardware and not transmitted. 
 
+When a malicious driver attempts to send a spoofed packet, it is dropped by
+the hardware and not transmitted.
 NOTE: This feature can be disabled for a specific Virtual Function (VF).
-  ip link set <pf dev> vf <vf id> spoofchk {off|on}
+ip link set <pf dev> vf <vf id> spoofchk {off|on}
+
 
 Support for UDP RSS
 -------------------
-This feature adds an on/off switch for hashing over certain flow types. The
-default setting is enabled.
 
-IEEE 1588 PTP
--------------
+This feature adds an ON/OFF switch for hashing over certain flow types. Only
+UDP can be turned on. The default setting is enabled .
+
+
+IEEE 1588 Precision Time Protocol (PTP) Hardware Clock (PHC)
+------------------------------------------------------------
+
 Precision Time Protocol (PTP) is used to synchronize clocks in a computer
 network and is supported in the i40e driver.
 
-The I40E_PTP is a compile time flag. The user may enable PTP at compile time
-by adding CFLAGS_EXTRA="-DI40E_PTP" to the make file when it is being
-compiled:
+I40E_PTP is a compile time flag. The user can enable it at compile time to add
+support for PTP from the driver. The flag is used by editing the make file
+as follows when it is being compiled:
+
+>make CFLAGS_EXTRA="-DI40E_PTP" install
 
-  make CFLAGS_EXTRA="-DI40E_PTP" install
 
 VXLAN Overlay HW Offloading
 ---------------------------
+
 VXLAN Overlay HW Offloading is enabled by default. The i40e Linux driver
 features VXLAN Overlay HW Offloading support. To view and configure
-VXLAN on a VXLAN-overlay offload enabled device,  use the following
+VXLAN on a VXLAN-overlay offload enabled device, use the following
 commands:
 
   # ethtool -k ethX
-    (This command displays the offloads and their current state.)
+   (This command displays the offloads and their current state.)
   # ethtool -K ethX tx-udp_tnl-segmentation [off|on]
-    (This enables/disables VXLAN support in the driver.)
+   (This enables/disables VXLAN support in the driver.)
 
 For more information on configuring your network for VXLAN overlay support,
 refer to the Intel Technical Brief, "Creating Overlay Networks Using Intel
 Ethernet Converged Network Adapters" (Intel Networking Division, August 2013):
 
-  http://www.intel.com/content/dam/www/public/us/en/documents/technology-briefs/
-  overlay-networks-using-converged-network-adapters-brief.pdf
+http://www.intel.com/content/dam/www/public/us/en/documents/technology-briefs/
+overlay-networks-using-converged-network-adapters-brief.pdf
+
 
 Multiple Functions per Port
 ---------------------------
+
 On X710/XL710 based adapters that support it, you can set up multiple functions
-on each physical port. You configure these functions through the System 
-Setup/BIOS. 
+on each physical port. You configure these functions through the System
+Setup/BIOS.
 
-Minimum TX Bandwidth is the guaranteed minimum data transmission bandwidth, as 
-a percentage of the full physical port link speed, that the partition will 
-receive. The bandwidth the partition is awarded will never fall below the level 
+Minimum TX Bandwidth is the guaranteed minimum data transmission bandwidth, as
+a percentage of the full physical port link speed, that the partition will
+receive. The bandwidth the partition is awarded will never fall below the level
 you specify here.
 
 The range for the minimum bandwidth values is:
 1 to ((100 minus # of partitions on the physical port) plus 1)
-For example, if a physical port has 4 partitions, the range would be 
+For example, if a physical port has 4 partitions, the range would be
 1 to ((100 - 4) + 1 = 97)
 
-The Maximum Bandwidth percentage represents the maximum transmit 
-bandwidth allocated to the partition as a percentage of the full physical port 
-link speed. The accepted range of values is 1-100. The value can be used as a 
-limiter, should you chose that any one particular function not be able to 
-consume 100% of a port's bandwidth (should it be available). The sum of 
-all the values for Maximum Bandwidth is not restricted, because no more than 
+The Maximum Bandwidth percentage represents the maximum transmit
+bandwidth allocated to the partition as a percentage of the full physical port
+link speed. The accepted range of values is 1-100. The value can be used as a
+limiter, should you chose that any one particular function not be able to
+consume 100% of a port's bandwidth (should it be available). The sum of
+all the values for Maximum Bandwidth is not restricted, because no more than
 100% of a port's bandwidth can ever be used.
 
-Once the initial configuration is complete, you can set different 
+Once the initial configuration is complete, you can set different
 bandwidth allocations on each function as follows:
- 1. Make a new directory named /config
- 2. edit etc/fstab to include:
-    
-     configfs  /config  configfs  defaults
-
- 3. Mount /config
- 4. Load (or reload) the i40e driver
- 5. Make a new directory under config for each partition upon which you wish
-    to configure the bandwidth.
- 6. Three files will appear under the config/partition directory:
-    - max_bw
-    - min_bw
-    - commit
-    read from max_bw to get display the current maximum bandwidth setting.
-    write to max_bw to set the maximum bandwidth for this function.
-    read from min_bw to display the current minimum bandwidth setting.
-    Write to min_bw to set the minimum bandwidth for this function.
-    Write a '1' to commit to save your changes.
+1. Make a new directory named /config
+2. edit etc/fstab to include:
+
+       configfs /config configfs defaults
+
+3. Mount /config
+4. Load (or reload) the i40e driver
+5. Make a new directory under config for each partition upon which you wish
+   to configure the bandwidth.
+6. Three files will appear under the config/partition directory:
+   - max_bw
+   - min_bw
+   - commit
+   read from max_bw to get display the current maximum bandwidth setting.
+   write to max_bw to set the maximum bandwidth for this function.
+   read from min_bw to display the current minimum bandwidth setting.
+   Write to min_bw to set the minimum bandwidth for this function.
+   Write a '1' to commit to save your changes.
 
 Notes: -commit is write only. Attempting to read it will result in an
-        error. 
-       -Writing to commit is only supported on the first function of
-        a given port. Writing to a subsequent function will result in an
-        error.
-       -Oversubscribing the minimum bandwidth is not supported. The underlaying
-        device's NVM will set the minimum bandwidth to supported values in an
-        indeterminate manner. Remove all of the directories under config and
-        reload them to see what the actual values are.
-       -To unload the driver you must first remove the directories created in
-        step 5, above.
+        error.
+       -Writing to commit is only supported on the first function of
+        a given port. Writing to a subsequent function will result in an
+        error.
+       -Oversubscribing the minimum bandwidth is not supported. The underlying
+        device's NVM will set the minimum bandwidth to supported values in an
+        indeterminate manner. Remove all of the directories under config and
+        reload them to see what the actual values are.
+       -To unload the driver you must first remove the directories created in
+        step 5, above.
 
 Example of Setting the minimum and maximum bandwidth (assume there are four
-function on the port eth6-eth9, and that eth6 is the first function on 
+function on the port eth6-eth9, and that eth6 is the first function on
 the port):
 
  # mkdir /config/eth6
@@ -517,12 +588,13 @@ the port):
 
  # echo 1 > /config/eth6/commit
 
+
 Data Center Bridging (DCB)
 --------------------------
 DCB is a configuration Quality of Service implementation in hardware.
-It uses the VLAN priority tag (802.1p) to filter traffic.  That means
+It uses the VLAN priority tag (802.1p) to filter traffic. That means
 that there are 8 different priorities that traffic can be filtered into.
-It also enables priority flow control (802.1Qbb) which can limit or 
+It also enables priority flow control (802.1Qbb) which can limit or
 eliminate the number of dropped packets during network stress. Bandwidth
 can be allocated to each of these priorities, which is enforced at the
 hardware level (802.1Qaz).
@@ -536,208 +608,280 @@ The i40e driver implements the DCB netlink interface layer to allow
 user-space to communicate with the driver and query DCB configuration for
 the port.
 
-===============================================================================
 
+Interrupt Rate Limiting
+-----------------------
+
+The Intel(R) Ethernet Controller XL710 family supports an interrupt rate
+limiting mechanism. The user can control, via ethtool, the number of
+microseconds between interrupts.
+
+Syntax:
+# ethtool -C ethX rx-usecs-high N
+
+Valid Range: 0-235 (0=no limit)
+
+The range of 0-235 microseconds provides an effective range of 4,310 to
+250,000 interrupts per second. The value of rx-usecs-high can be set
+independently of rx-usecs and tx-usecs in the same ethtool command, and
+is also independent of the adaptive interrupt moderation algorithm. The
+underlying hardware supports granularity in 4-microsecond intervals, so
+adjacent values may result in the same interrupt rate.
+
+One possible use case is the following:
+# ethtool -C ethX adaptive-rx off adaptive-tx off rx-usecs-high 20 rx-usecs 5
+tx-usecs 5
+
+The above command would disable adaptive interrupt moderation, and allow a
+maximum of 5 microseconds before indicating a receive or transmit was complete.
+ However, instead of resulting in as many as 200,000 interrupts per second, it
+limits total interrupts per second to 50,000 via the rx-usecs-high parameter.
+
+
+Performance Optimization:
+-------------------------
+
+Driver defaults are meant to fit a wide variety of workloads, but if further
+optimization is required we recommend experimenting with the following settings.
+
+Pin the adapter's IRQs to specific cores by disabling the irqbalance service
+and using the included set_irq_affinity script.
+The following settings will distribute the IRQs across all the cores evenly:
+  # scripts/set_irq_affinity -x all <interface1> , [ <interface2>, ... ]
+The following settings will distribute the IRQs across all the cores that are
+local to the adapter (same NUMA node):
+  # scripts/set_irq_affinity -x local <interface1> ,[ <interface2>, ... ]
+Please see the script's help text for further options.
+
+For very CPU intensive workloads, we recommend pinning the IRQs to all cores.
+For IP Forwarding: Disable Adaptive ITR and lower rx and tx interrupts per
+queue using ethtool.
+# ethtool <interface> adaptive-rx off adaptive-tx off rx-usecs 125 tx-usecs 125
+Setting rx-usecs and tx-usecs to 125 will limit interrupts to about 8000
+interrupts per second per queue.
+
+For lower CPU utilization: Disable Adaptive ITR and lower rx and tx interrupts
+per queue using ethtool.
+# ethtool <interface> adaptive-rx off adaptive-tx off rx-usecs 250 tx-usecs 250
+Setting rx-usecs and tx-usecs to 250 will limit interrupts to about 4000
+interrupts per second per queue.
+
+For lower latency: Disable Adaptive ITR and ITR by setting rx and tx to 0
+using ethtool.
+# ethtool <interface> adaptive-rx off adaptive-tx off rx-usecs 0 tx-usecs 0
+
+
+================================================================================
+
+
+Known Issues/Troubleshooting
+----------------------------
 
-Known Issues
-------------
 
 Incomplete messages in the system log
 -------------------------------------
+
 The NVMUpdate utility may write several incomplete messages in the system log.
 These messages take the form:
   in the driver Pci Ex config function byte index 114
   in the driver Pci Ex config function byte index 115
 These messages can be ignored.
 
+
 Bad checksum counter incorrectly increments when using VxLAN
 ------------------------------------------------------------
+
 When passing non-UDP traffic over a VxLAN interface, the port.rx_csum_bad
 counter increments for the packets.
 
+
 Virtual machine does not get link
 ---------------------------------
+
 If the virtual machine has more than one virtual port assigned to it, and those
 virtual ports are bound to different physical ports, you may not get link on all
 of the virtual ports. The following command may work around the issue:
-  ethtool \96r <PF>
+ethtool -r <PF>
 Where <PF> is the PF interface in the host, for example: p5p1. You may need to
 run the command more than once to get link on all virtual ports.
 
+
 MAC address of Virtual Function changes unexpectedly
 ----------------------------------------------------
+
 If a Virtual Function's MAC address is not assigned in the host, then the
 VF (virtual function) driver will use a random MAC address. This random MAC
 address may change each time the VF driver is reloaded. You can assign a
 static MAC address in the host machine. This static MAC address will survive
 a VF driver reload.
 
+
 Enabling TSO may cause data integrity issues
 --------------------------------------------
-Enabling TSO on kernel 3.14 or newer may cause data integrity issues. 
+
+Enabling TSO on kernel 3.14 or newer may cause data integrity issues.
 Kernel 3.10 and older do not exhibit this behavior.
 
 
 Changing the number of Rx or Tx queues with ethtool -L may cause a kernel panic
 -------------------------------------------------------------------------------
-Changing the number of Rx or Tx queues with ethtool -L while traffic is flowing 
+
+Changing the number of Rx or Tx queues with ethtool -L while traffic is flowing
 and the interface is up may cause a kernel panic. Bring the interface down first
 to avoid the issue. For example:
   ip link set ethx down
   ethtool -L ethx combined 4
 
+
 Adding a Flow Director Sideband rule fails incorrectly
 ------------------------------------------------------
+
 If you try to add a Flow Director rule when no more sideband rule space is
 available, i40e logs an error that the rule could not be added, but ethtool
 returns success. You can remove rules to free up space. In addition, remove
 the rule that failed. This will evict it from the driver's cache.
 
+
 Flow Director Sideband Logic adds duplicate filter
 --------------------------------------------------
+
 The Flow Director Sideband Logic adds a duplicate filter in the software filter
 list if the location is not specified or the specified location differs from
 the previous location but has the same filter criteria. In this case, the
 second of the two filters that appear is the valid one in hardware and it
 decides the filter action.
 
+
 Multiple Interfaces on Same Ethernet Broadcast Network
 ------------------------------------------------------
+
 Due to the default ARP behavior on Linux, it is not possible to have one
 system on two IP networks in the same Ethernet broadcast domain
 (non-partitioned switch) behave as expected. All Ethernet interfaces will
 respond to IP traffic for any IP address assigned to the system. This results
 in unbalanced receive traffic.
 
-If you have multiple interfaces in a server, turn on ARP filtering by
-entering: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+If you have multiple interfaces in a server, either turn on ARP filtering by
+entering:
+echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
 
 This only works if your kernel's version is higher than 2.4.5.
 
+
 NOTE: This setting is not saved across reboots. The configuration change can
-be made permanent by adding the following line to the /etc/sysctl.conf file:
+be made permanent by adding the following line to the file /etc/sysctl.conf:
+net.ipv4.conf.all.arp_filter = 1
 
-    net.ipv4.conf.all.arp_filter = 1
+Another alternative is to install the interfaces in separate broadcast domains
+(either in different switches or in a switch partitioned to VLANs).
 
-The change can also be made permanent by installing the interfaces in
-separate broadcast domains (either in different switches or in a switch
-partitioned to VLANs).
 
 UDP Stress Test Dropped Packet Issue
 ------------------------------------
-Under small packets UDP stress test with the i40e driver, the Linux system may
-drop UDP packets due to the fullness of socket buffers. The driver Flow
-Control variables may be changed to the minimum value for controlling packet
-reception, or the kernel's default buffer sizes can be increased for UDP by
-changing the values in /proc/sys/net/core/rmem_default and rmem_max.
 
-Unplugging network cable while ethtool -p is running
+Under small packet UDP stress with the i40edriver, the system may
+drop UDP packets due to socket buffers being full. Setting the driver Flow
+Control variables to the minimum may resolve the issue. You may also try
+increasing the kernel's default buffer sizes by changing the values in
+
+  /proc/sys/net/core/rmem_default and rmem_max
+
+
+Unplugging Network Cable While ethtool -p is Running
 ----------------------------------------------------
-In kernel versions 2.6.32 and later, unplugging the network cable while
-ethtool -p is running will cause the system to become unresponsive to keyboard
-commands, except for control-alt-delete. Restarting the system appears to be
-the only remedy.
 
-Rx page allocation errors
+In kernel versions 2.6.32 and newer, unplugging the network cable while
+ethtool -p is running will cause the system to become unresponsive to
+keyboard commands, except for control-alt-delete. Restarting the system
+appears to be the only remedy.
+
+
+Rx Page Allocation Errors
 -------------------------
-"order:0" errors may occur under stress with kernels 2.6.25 and newer. This is
-caused by the way the Linux kernel reports this stressed condition.
+
+'Page allocation failure. order:0' errors may occur under stress with kernels
+2.6.25 and newer. This is caused by the way the Linux kernel reports this
+stressed condition.
+
+
 
 Disable GRO when routing/bridging
 ---------------------------------
-Due to a known kernel issue, GRO must be turned off when routing/bridging.
-GRO can be disabled using ethtool:
 
-  # ethtool -K ethX gro off
+Due to a known kernel issue, GRO must be turned off when routing/bridging. GRO
+can be turned off via ethtool.
+ethtool -K ethX gro off
+
+where ethX is the ethernet interface being modified.
 
-where ethX is the Ethernet interface to be modified.
 
 Lower than expected performance
 -------------------------------
+
 Some PCIe x8 slots are actually configured as x4 slots. These slots have
 insufficient bandwidth for full line rate with dual port and quad port
 devices. In addition, if you put a PCIe Generation 3-capable adapter
 into a PCIe Generation 2 slot, you cannot get full bandwidth. The driver
 detects this situation and writes the following message in the system log:
 
-  "PCI-Express bandwidth available for this card is not sufficient for optimal
-  performance. For optimal performance a x8 PCI-Express slot is required."
+"PCI-Express bandwidth available for this card is not sufficient for optimal
+performance. For optimal performance a x8 PCI-Express slot is required."
+
+If this error occurs, moving your adapter to a true PCIe Generation 3 x8 slot
+ will resolve the issue.
 
-If this error occurs, moving your adapter to a true PCIe Generation 3 x8 slot 
-will resolve the issue.
 
 ethtool may incorrectly display SFP+ fiber module as direct attached cable
 --------------------------------------------------------------------------
+
 Due to kernel limitations, port type can only be correctly displayed on kernel
-2.6.33 or newer.
+2.6.33 or greater.
+
 
 Running ethtool -t ethX command causes break between PF and test client
 -----------------------------------------------------------------------
+
 When there are active VFs, "ethtool -t" performs a full diagnostic. In the
 process, it resets itself and all attached VFs. The VF drivers encouter a
 disruption, but are able to recover.
 
+
 Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS
 under Linux KVM
 ------------------------------------------------------------------------
+
 KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
 includes traditional PCIe devices, as well as SR-IOV-capable devices using
 Intel XL710-based controllers.
 
+
 Unable to obtain DHCP lease on boot with RedHat
 -----------------------------------------------
+
 For configurations where the auto-negotiation process takes more than 5
 seconds, the boot script may fail with the following message:
-
-  "ethX: failed. No link present. Check cable?"
+"ethX: failed. No link present. Check cable?"
 
 If this error appears even though the presence of a link can be confirmed
-using ethtool ethX, try setting LINKDELAY to 5 seconds (LINKDELAY=5) in
+using ethtool ethX, try setting "LINKDELAY=5" in
 /etc/sysconfig/network-scripts/ifcfg-ethX.
 
-NOTE: Link time can take up to 30 seconds. The LINKDELAY value should be
-adjusted accordingly.
+NOTE: Link time can take up to 30 seconds. Adjust LINKDELAY value accordingly.
 
 Alternatively, NetworkManager can be used to configure the interfaces, which
 avoids the set timeout. For configuration instructions of NetworkManager
 refer to the documentation provided by your distribution.
 
-Loading i40e driver in 3.2.x and newer kernels displays kernel tained message
------------------------------------------------------------------------------
-Due to recent kernel changes, loading an out of tree driver causes the kernel
-to be tainted.
-
-Software bridging does not work with SR-IOV Virtual Functions
--------------------------------------------------------------
-SR-IOV Virtual Functions are unable to send or receive traffic between VMs
-using emulated connections on a Linux Software bridge and connections that use
-SR-IOV VFs.
-
-Packet Filter cannot forward packets to emulated device when it is in bridge 
-mode and SR-IOV is enabled or for NPAR enabled adapter
-----------------------------------------------------------------------------
-In SR-IOV enabled or NPAR enabled adapters, Physical Function (PF) does not 
-work in bridge mode. When a bridge is created on the PF device, an emulation 
-device in the Virtual Machine (VM) connects to this bridge cannot receive any 
-unicast packets. 
 
-To avoid this from occurring, for each emulated device (software Virtual 
-Station Interface [VSI]) added to the bridge the MAC address of the emulated
-device (software Virtual Ethernet Bridge [VEB] VSI) needs to be added manually 
-to the forwarding database (FDB) filter table using the iproute2 package 
-bridge tool. This can be done by executing the following command:
+Loading i40e driver in 3.2.x and newer kernels displays kernel tainted message
+------------------------------------------------------------------------------
 
-  # bridge fdb add <MAC ADDR> dev <PF device interface> self permanent
-
-The FDB entry when the emulated device is no longer in use or the guest to 
-which it is assigned is moved to a different host must be deleted using this
-command:
-
-  # bridge fdb del <MAC ADDR> dev <PF device interface>
+Due to recent kernel changes, loading an out of tree driver causes the kernel
+to be tainted.
 
 
+================================================================================
 
-===============================================================================
 
 Support
 -------
@@ -745,18 +889,17 @@ For general information, go to the Intel support website at:
 www.intel.com/support/
 
 or the Intel Wired Networking project hosted by Sourceforge at:
-http://sourceforge.net/projects/e1000
-
-If an issue is identified with the released source code on the supported
+http://sourceforge.net/projects/i40e
+If an issue is identified with the released source code on a supported
 kernel with a supported adapter, email the specific information related to the
-issue to e1000-devel@lists.sf.net.
+issue to i40e-devel@lists.sf.net.
+
+
+================================================================================
 
-===============================================================================
 
 License
 -------
-Intel i40e  Linux driver.
-Copyright(c) 2014 - 2015 Intel Corporation.
 
 This program is free software; you can redistribute it and/or modify it under
 the terms and conditions of the GNU General Public License, version 2, as
@@ -773,10 +916,16 @@ St - Fifth Floor, Boston, MA 02110-1301 USA.
 The full GNU General Public License is included in this distribution in the
 file called "COPYING".
 
-===============================================================================
+Intel(R) XL710/X710 Network Driver
+Intel(R) XL710/X710 Virtual Function Network Driver
+Copyright(c) 2014-2015 Intel Corporation.
+================================================================================
+
+
 
 Trademarks
 ----------
+
 Intel, Itanium, and Pentium are trademarks or registered trademarks of Intel
 Corporation or its subsidiaries in the United States and other countries.
 
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/SUMS b/i40e-dkms-1.3.47/i40e-1.3.47/SUMS
new file mode 100644 (file)
index 0000000..1b96aea
--- /dev/null
@@ -0,0 +1,47 @@
+46789     4 i40e-1.3.47/pci.updates
+63894     4 i40e-1.3.47/src/i40e/i40e_helper.h
+44588     1 i40e-1.3.47/src/i40e/Module.supported
+13919    68 i40e-1.3.47/src/i40e/i40e_adminq_cmd.h
+07159   138 i40e-1.3.47/src/i40e/kcompat.h
+19688    82 i40e-1.3.47/src/i40e/i40e_debugfs.c
+05671    86 i40e-1.3.47/src/i40e/i40e_txrx.c
+29128     6 i40e-1.3.47/src/i40e/i40e_lan_hmc.h
+33481    42 i40e-1.3.47/src/i40e/i40e_nvm.c
+04585    30 i40e-1.3.47/src/i40e/i40e_adminq.c
+33767     2 i40e-1.3.47/src/i40e/Kbuild
+07400     3 i40e-1.3.47/src/i40e/i40e_alloc.h
+30794    66 i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c
+11873     7 i40e-1.3.47/src/i40e/i40e_dcb.h
+60649     2 i40e-1.3.47/src/i40e/i40e_diag.h
+29182    21 i40e-1.3.47/src/i40e/i40e_prototype.h
+29883    23 i40e-1.3.47/src/i40e/i40e_ptp.c
+36071    37 i40e-1.3.47/src/i40e/i40e_dcb.c
+49741     4 i40e-1.3.47/src/i40e/i40e_osdep.h
+00677     9 i40e-1.3.47/src/i40e/i40e_dcb_nl.c
+37534   320 i40e-1.3.47/src/i40e/i40e_main.c
+17556   221 i40e-1.3.47/src/i40e/i40e_register.h
+57597    52 i40e-1.3.47/src/i40e/kcompat.c
+52476     5 i40e-1.3.47/src/i40e/i40e_adminq.h
+50800     6 i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h
+57192    47 i40e-1.3.47/src/i40e/i40e_fcoe.c
+42138     9 i40e-1.3.47/src/i40e/i40e_hmc.h
+30403    52 i40e-1.3.47/src/i40e/i40e_type.h
+64716   105 i40e-1.3.47/src/i40e/i40e_ethtool.c
+22745     4 i40e-1.3.47/src/i40e/i40e_status.h
+60864    13 i40e-1.3.47/src/i40e/i40e_virtchnl.h
+23673     6 i40e-1.3.47/src/i40e/i40e_diag.c
+30055     2 i40e-1.3.47/src/i40e/i40e_devids.h
+23231    11 i40e-1.3.47/src/i40e/i40e_hmc.c
+45761    11 i40e-1.3.47/src/i40e/i40e_configfs.c
+12717   164 i40e-1.3.47/src/i40e/i40e_common.c
+25879    42 i40e-1.3.47/src/i40e/i40e_lan_hmc.c
+65207     5 i40e-1.3.47/src/i40e/i40e_fcoe.h
+10333    31 i40e-1.3.47/src/i40e/i40e.h
+22557    13 i40e-1.3.47/src/i40e/i40e_txrx.h
+48687    11 i40e-1.3.47/src/Makefile
+09576     6 i40e-1.3.47/scripts/set_irq_affinity
+53852     2 i40e-1.3.47/scripts/dump_tables
+02733    18 i40e-1.3.47/COPYING
+52865    10 i40e-1.3.47/i40e.spec
+18539    35 i40e-1.3.47/README
+08612     3 i40e-1.3.47/i40e.7
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/dkms.conf b/i40e-dkms-1.3.47/i40e-1.3.47/dkms.conf
new file mode 100644 (file)
index 0000000..ee5f5c0
--- /dev/null
@@ -0,0 +1,8 @@
+MAKE[0]="make -C src/ KERNELDIR=/lib/modules/${kernelver}/build  BUILD_KERNEL=${kernelver}"
+CLEAN="make -C src/ clean"
+BUILT_MODULE_NAME[0]=i40e
+BUILT_MODULE_LOCATION[0]=src/
+DEST_MODULE_LOCATION[0]="/updates"
+PACKAGE_NAME=i40e-dkms
+PACKAGE_VERSION=1.3.47
+REMAKE_INITRD=yes
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/i40e.7 b/i40e-dkms-1.3.47/i40e-1.3.47/i40e.7
new file mode 100755 (executable)
index 0000000..deab376
--- /dev/null
@@ -0,0 +1,58 @@
+.\" LICENSE
+.\"
+.\" This software program is released under the terms of a license agreement between you ('Licensee') and Intel. Do not use or load this software or any associated materials (collectively, the 'Software') until you have carefully read the full terms and conditions of the LICENSE located in this software package. By loading or using the Software, you agree to the terms of this Agreement. If you do not agree with the terms of this Agreement, do not install or use the Software.
+.\"
+.\" * Other names and brands may be claimed as the property of others.
+.\"
+.
+.TH i40e 1 "February 06, 2015"
+.SH NAME
+i40e \-This file describes the Linux* Base Driver for the Intel Ethernet Controller XL710 Family of Controllers.
+.SH SYNOPSIS
+.PD 0.4v
+modprobe i40e [<option>=<VAL1>,<VAL2>,...]
+.PD 1v
+.SH DESCRIPTION
+This driver is intended for \fB2.6.32\fR and newer kernels. 
+This driver includes support for any 64 bit Linux supported system, 
+including Itanium(R)2, x86_64, PPC64,ARM, etc.
+.LP
+This driver is only supported as a loadable module at this time. Intel is
+not supplying patches against the kernel source to allow for static linking of
+the drivers.
+
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel adapter. All hardware requirements listed apply to
+use with Linux.
+.SH Jumbo Frames
+.LP
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit
+(MTU) to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number:
+
+   ifconfig eth<x> mtu 9000 up
+
+.LP
+NOTES:
+- The maximum MTU setting for Jumbo Frames is 9706. This value coincides
+  with the maximum Jumbo Frames size of 9728 bytes.
+- This driver will attempt to use multiple page sized buffers to receive
+  each jumbo packet. This should help to avoid buffer starvation issues
+  when allocating receive packets.
+See the section "Jumbo Frames" in the Readme.
+.SH SUPPORT
+.LP
+For additional information regarding building and installation,
+see the
+README
+included with the driver.
+For general information, go to the Intel support website at:
+.B www.intel.com/support/
+.LP
+If an issue is identified with the released source code on a supported
+kernel with a supported adapter, email the specific information related to the
+issue to i40e-devel@lists.sf.net.
+.LP
similarity index 99%
rename from i40e-dkms-1.2.48/i40e-1.2.48/i40e.spec
rename to i40e-dkms-1.3.47/i40e-1.3.47/i40e.spec
index 68243b4b22ca6d5a1571f1e1e89102e496a489ba..f85a2a8ecd45c4ea2d724535105a4f85b945ce53 100644 (file)
@@ -1,6 +1,6 @@
 Name: i40e
 Summary: Intel(R) Ethernet Connection XL710 Linux Driver
-Version: 1.2.48
+Version: 1.3.47
 Release: 1
 Source: %{name}-%{version}.tar.gz
 Vendor: Intel Corporation
similarity index 60%
rename from i40e-dkms-1.2.48/i40e-1.2.48/pci.updates
rename to i40e-dkms-1.3.47/i40e-1.3.47/pci.updates
index a06ea949aef11abe505fa73aae6d0bfa80fc704b..e0ae83fea4785eff8053b2446466e6bf88e10299 100644 (file)
        1571  XL710/X710 Virtual Function
        1572  Ethernet Controller X710 for 10GbE SFP+
                1028 0000  Ethernet 10G X710 rNDC
-               1028 1f98  Ethernet 10G 4P X710-k bNDC
                1028 1f99  Ethernet 10G 4P X710/I350 rNDC
                1028 1f9c  Ethernet 10G 4P X710 SFP+ rNDC
-               103c 0000  HP Ethernet 10Gb 562SFP+ Adapter
-               103c 22fc  HP Ethernet 10Gb 2-port 562FLR-SFP+ Adapter
-               103c 22fd  HP Ethernet 10Gb 2-port 562SFP+ Adapter
+               103c 0000  HPE Ethernet 10Gb 562SFP+ Adapter
+               103c 22fc  HPE Ethernet 10Gb 2-port 562FLR-SFP+ Adapter
+               103c 22fd  HPE Ethernet 10Gb 2-port 562SFP+ Adapter
                1137 0000  Cisco(R) Ethernet Converged NIC X710-4
                1137 013b  Cisco(R) Ethernet Converged NIC X710-4
+               17aa 0000  Lenovo ThinkServer X710 AnyFabric for 10GbE SFP+
+               17aa 4001  Lenovo ThinkServer X710-4 AnyFabric for 10GbE SFP+
+               17aa 4002  Lenovo ThinkServer X710-2 AnyFabric for 10GbE SFP+
                8086 0000  Ethernet Converged Network Adapter X710
                8086 0001  Ethernet Converged Network Adapter X710-4
                8086 0002  Ethernet Converged Network Adapter X710-4
                8086 0006  Ethernet Converged Network Adapter X710
                8086 0007  Ethernet Converged Network Adapter X710-2
                8086 0008  Ethernet Converged Network Adapter X710-2
-               8086 0009  Ethernet Converged Network Adapter X710-4
-               8086 000a  Ethernet Converged Network Adapter X710-2
-               8086 4005  Ethernet Controller XL710 for 10 Gigabit SFP+
+               8086 0009  Ethernet Controller X710 for 10GbE SFP+ 
+               8086 000a  Ethernet Controller X710 for 10GbE SFP+ 
+               8086 4005  Ethernet Controller X710 for 10GbE SFP+
+               8086 4006  Ethernet Controller X710 for 10GbE SFP+ 
        1580  Ethernet Controller XL710 for 40GbE backplane
        1581  Ethernet Controller X710 for 10GbE backplane
                1028 0000  Ethernet 10G X710-k bNDC
                1028 1f98  Ethernet 10G 4P X710-k bNDC
                1028 1f9e  Ethernet 10G 2P X710-k bNDC
+               8086 0000  Ethernet Converged Network Adapter XL710-Q2
        1583  Ethernet Controller XL710 for 40GbE QSFP+
-               108e 0000  Oracle 10 Gb and 40 Gb Ethernet Adapter
-               108e 7b1b  Oracle 10 Gb and 40 Gb Ethernet Adapter
+               1028 0000  Ethernet 40G 2P XL710 QSFP+ rNDC
+               1028 1f9f  Ethernet 40G 2P XL710 QSFP+ rNDC
+               108e 0000  Oracle Quad 10Gb Ethernet Adapter
+               108e 7b1b  Oracle Quad 10Gb Ethernet Adapter
                1137 0000  Cisco(R) Ethernet Converged NIC XL710-Q2
                1137 013c  Cisco(R) Ethernet Converged NIC XL710-Q2
                8086 0000  Ethernet Converged Network Adapter XL710-Q2
                8086 0001  Ethernet Converged Network Adapter XL710-Q2
                8086 0002  Ethernet Converged Network Adapter XL710-Q2
                8086 0003  Ethernet I/O Module XL710-Q2
+               8086 0004  Ethernet Server Adapter XL710-Q2OCP
+               8086 0006  Ethernet Converged Network Adapter XL710-Q2
        1584  Ethernet Controller XL710 for 40GbE QSFP+
                8086 0000  Ethernet Converged Network Adapter XL710-Q1
                8086 0001  Ethernet Converged Network Adapter XL710-Q1
                8086 0002  Ethernet Converged Network Adapter XL710-Q1
                8086 0003  Ethernet I/O Module XL710-Q1
+               8086 0004  Ethernet Server Adapter XL710-Q1OCP
        1585  Ethernet Controller X710 for 10GbE QSFP+
        1586  Ethernet Controller X710 for 10GBASE-T
                108e 0000  Ethernet Controller X710 for 10GBASE-T
                108e 4857  Ethernet Controller X710 for 10GBASE-T
        1587  Ethernet Controller XL710 for 20GbE backplane
-               103c 0000  HP Flex-20 20Gb 2-port 660FLB Adapter
-               103c 22fe  HP Flex-20 20Gb 2-port 660FLB Adapter
+               103c 0000  HPE Flex-20 20Gb 2-port 660FLB Adapter
+               103c 22fe  HPE Flex-20 20Gb 2-port 660FLB Adapter
+       1588  Ethernet Controller XL710 for 20GbE backplane
+               103c 0000  HPE Flex-20 20Gb 2-port 660M Adapter
+               103c 22ff  HPE Flex-20 20Gb 2-port 660M Adapter
+       1589  Ethernet Controller X710/X557-AT 10GBASE-T
+               8086 0000  Ethernet Converged Network Adapter X710-T
+               8086 0001  Ethernet Converged Network Adapter X710-T4
+               8086 0002  Ethernet Converged Network Adapter X710-T4
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/scripts/dump_tables b/i40e-dkms-1.3.47/i40e-1.3.47/scripts/dump_tables
new file mode 100755 (executable)
index 0000000..87d510a
--- /dev/null
@@ -0,0 +1,69 @@
+#!/bin/bash
+# Dump Tables script
+# Copyright (C) 2015 Intel Corporation
+#
+# This script is used to generate a dump of the hardware state for
+# sending to linux.nics@intel.com for debugging purposes.  This
+# script will generate a dump_tables.gz output file that can be
+# uploaded or emailed.
+
+# Usage: dump_tables eth1
+
+DEV=$1
+. /sys/class/net/$DEV/device/uevent
+# DRIVER=i40e
+# PCI_CLASS=20000
+# PCI_ID=8086:1583
+# PCI_SUBSYS_ID=8086:0002
+# PCI_SLOT_NAME=0000:06:00.0
+# MODALIAS=pci:v00008086d00001583sv00008086sd00000002bc02sc00i00
+
+if [ "$DEV" == "" ]; then
+       echo Usage: $0 "<i40e ethernet interface>"
+       exit -1
+fi
+
+if [ "$PCI_SLOT_NAME" == "" ]; then
+       echo kernel version `uname -r` is not supported, please report the bug at e1000.sourceforge.net
+       exit -2
+fi
+
+CLUSTER=1
+TABLE=0
+INDEX=0
+
+OUTFILE=`mktemp`
+TMPFILE=`mktemp`
+
+# check for the debugfs directory being mounted
+if [ -d "/sys/kernel/debug/i40e" ]; then
+       echo debugfs found
+else
+       echo -n "mounting debugfs as /sys/kernel/debug: "
+       mount -t debugfs none /sys/kernel/debug && echo Success || (echo Failure ; exit -3)
+fi
+
+dmesg -c > /dev/null
+until [ "$TABLE" == "0xff" ]; do
+       until [ "$INDEX" == "0xffffffff" ]; do
+               echo dump debug fwdata $CLUSTER $TABLE $INDEX > /sys/kernel/debug/i40e/$PCI_SLOT_NAME/command
+               # check output, exit if no good
+               dmesg | grep -q unknown && (echo error encountered, see log; exit -4)
+               # store it, without modification
+               dmesg >> $OUTFILE
+               # erase it and prepare for parse
+               dmesg -c > $TMPFILE
+               TABLE=`grep rlen $TMPFILE | sed -e 's/.*next_table=\(.*\) .*\$/\1/'`
+               INDEX=`grep rlen $TMPFILE | sed -e 's/.*next_index=\(.*\)\$/\1/'`
+               echo -n .
+       done
+       INDEX=0
+done
+
+gzip $OUTFILE
+cp $OUTFILE.gz dump_tables.gz
+
+rm $OUTFILE.gz
+rm $TMPFILE
+
+echo Please send the file dump_tables.gz to linux.nics@intel.com or your Intel Support representative.
old mode 100644 (file)
new mode 100755 (executable)
similarity index 84%
rename from i40e-dkms-1.2.48/i40e-1.2.48/scripts/set_irq_affinity
rename to i40e-dkms-1.3.47/i40e-1.3.47/scripts/set_irq_affinity
index 9670a69..b362357
@@ -1,28 +1,3 @@
-################################################################################
-#
-# Copyright(c) 2013 - 2015 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
-
 #!/bin/bash
 #
 # Copyright (c) 2014, Intel Corporation
@@ -167,6 +142,9 @@ setaff()
 
        irqs=$(grep "$queues" /proc/interrupts | cut -f1 -d:)
        [ -z "$irqs" ] && irqs=$(grep $IFACE /proc/interrupts | cut -f1 -d:)
+       [ -z "$irqs" ] && irqs=$(for i in `ls -Ux /sys/class/net/$IFACE/device/msi_irqs` ;\
+                                do grep "$i:.*TxRx" /proc/interrupts | grep -v fdir | cut -f 1 -d : ;\
+                                done)
        [ -z "$irqs" ] && echo "Error: Could not find interrupts for $IFACE"
 
        echo "IFACE CORE MASK -> FILE"
similarity index 79%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/Makefile
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/Makefile
index 95048687ec76efbf64e2414ab019ed29e68e4a60..1f20882980b88b4197a20741455851f9b1a86a63 100644 (file)
@@ -1,4 +1,27 @@
 ###########################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Driver
+# Copyright(c) 2013 - 2015 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+###########################################################################
 # Environment tests
 
 ifeq (,$(BUILD_KERNEL))
@@ -87,6 +110,25 @@ ifeq (,$(wildcard $(CONFIG_FILE)))
   $(error Linux kernel source not configured - missing autoconf.h)
 endif
 
+# Some helper functions for converting kernel version to version codes
+get_kver = $(or $(word ${2},$(subst ., ,${1})),0)
+get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \
+                       [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \
+                       [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \
+                       printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) )
+
+# Convert LINUX_VERSION into LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION},)
+  LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3))
+endif
+
+# Honor LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION_CODE},)
+  $(info Setting LINUX_VERSION_CODE to ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}))
+  KVER_CODE := ${LINUX_VERSION_CODE}
+  EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE}
+endif
+
 EXTRA_CFLAGS += $(CFLAGS_EXTRA)
 
 # get the kernel version - we use this to find the correct install path
@@ -100,13 +142,9 @@ ifneq ($(KSRC),$(shell readlink /lib/modules/$(KVER)/build))
 endif
 endif
 
-KVER_CODE := $(shell $(CC) $(EXTRA_CFLAGS) -E -dM $(VSP) 2> /dev/null |\
-       grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
-
-# Ubnntu kernel 3.13.0-30-generic does not have UTS_UBUNTU_RELEASE_ABI, but
-# already includes pkt_hash_types. Pass the UTS_UBUNTU_RELEASE_ABI to the compiler.
-ifeq ($(KVER),3.13.0-30-generic)
-    CFLAGS_EXTRA += -DUTS_UBUNTU_RELEASE_ABI=30
+ifeq (${KVER_CODE},)
+  KVER_CODE := $(shell $(CC) $(EXTRA_CFLAGS) -E -dM $(VSP) 2> /dev/null |\
+                 grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
 endif
 
 # set the install path before and after 3.2.0, and handle
@@ -218,7 +256,7 @@ install: $(MODULES) manfile
 # remove all old versions of the driver
        $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko))
        $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).gz))
-       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).xz))
+       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko.xz))
        $(foreach m, $(DRIVERS), $(shell \
                install -D -m 644 $(m).$(MANSECTION).gz $(INSTALL_MOD_PATH)$(MANDIR)/man$(MANSECTION)/$(m).$(MANSECTION).gz ; \
                install -D -m 644 $(m).ko $(INSTALL_MOD_PATH)$(INSTDIR)/$(m)/$(m).ko))
@@ -231,7 +269,7 @@ endif
 uninstall:
        $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko))
        $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).gz))
-       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).xz))
+       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko.xz))
        @-/sbin/depmod -a $(KVER)
        $(foreach m, $(DRIVERS), $(shell \
                if [ -e $(INSTALL_MOD_PATH)$(MANDIR)/man$(MANSECTION)/$(m).$(MANSECTION).gz ] ; then \
similarity index 80%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e.h
index a73f308933fa82f53d49ea7b8e1ef052e53aed65..46fa83b851a4bc48b6782f7ea22578e0e1f579b1 100644 (file)
@@ -85,7 +85,6 @@
 #define I40E_MAX_VEB          16
 
 #define I40E_MAX_NUM_DESCRIPTORS      4096
-#define I40E_MAX_REGISTER     0x800000
 #define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
 #define I40E_DEFAULT_NUM_DESCRIPTORS  512
 #define I40E_REQ_DESCRIPTOR_MULTIPLE  32
 #define I40E_MIN_MSIX                 2
 #define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */
 #define I40E_MIN_VSI_ALLOC            51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
-#define I40E_DEFAULT_QUEUES_PER_VMDQ  1 /* max 16 qps */
+#define i40e_default_queues_per_vmdq(pf)  1 /* max 16 qps */
 #define I40E_DEFAULT_QUEUES_PER_VF    4
 #define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
-#define i40e_pf_get_max_q_per_tc(_hw)  64 /* should be a power of 2 */
+#define i40e_pf_get_max_q_per_tc(pf)  64 /* should be a power of 2 */
 #define I40E_FDIR_RING                0
 #define I40E_FDIR_RING_COUNT          32
 #ifdef I40E_FCOE
 #endif /* I40E_FCOE */
 #define I40E_MAX_AQ_BUF_SIZE          4096
 #define I40E_AQ_LEN                   256
-#define I40E_AQ_WORK_LIMIT            32
+#define I40E_AQ_WORK_LIMIT            66 /* max number of VFs + a little */
 #define I40E_MAX_USER_PRIORITY        8
 #define I40E_DEFAULT_MSG_ENABLE       4
 #define I40E_QUEUE_WAIT_RETRY_LIMIT   10
-#define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
+#define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 16)
 
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 /* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG      (1 << 0)
+#define I40E_PRIV_FLAGS_MFP_FLAG       BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG  BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR         BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS      BIT(3)
 #endif
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
 #define I40E_NVM_VERSION_HI_SHIFT  12
 #define I40E_NVM_VERSION_HI_MASK   (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK    0xffff
+#define I40E_OEM_VER_PATCH_MASK    0xff
+#define I40E_OEM_VER_BUILD_SHIFT   8
+#define I40E_OEM_VER_SHIFT         24
 
 /* The values in here are decimal coded as hex as is the case in the NVM map*/
 #define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -193,9 +199,12 @@ struct i40e_lump_tracking {
 #define I40E_FDIR_BUFFER_HEAD_ROOM     32
 #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
 
+#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
+
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
        I40E_FD_STAT_SB,
+       I40E_FD_STAT_ATR_TUNNEL,
        I40E_FD_STAT_PF_COUNT
 };
 #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
@@ -203,6 +212,8 @@ enum i40e_fd_stat_idx {
                        (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
 #define I40E_FD_SB_STAT_IDX(pf_id)  \
                        (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+                       (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
 
 struct i40e_fdir_filter {
        struct hlist_node fdir_node;
@@ -226,6 +237,47 @@ struct i40e_fdir_filter {
        u32 fd_id;
 };
 
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+
+#define I40E_CLOUD_FIELD_OMAC  0x01
+#define I40E_CLOUD_FIELD_IMAC  0x02
+#define I40E_CLOUD_FIELD_IVLAN 0x04
+#define I40E_CLOUD_FIELD_TEN_ID        0x08
+#define I40E_CLOUD_FIELD_IIP   0x10
+
+#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \
+                                            I40E_CLOUD_FIELD_IVLAN)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+                                                  I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \
+                                                  I40E_CLOUD_FIELD_IMAC | \
+                                                  I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+                                                   I40E_CLOUD_FIELD_IVLAN | \
+                                                   I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IIP  I40E_CLOUD_FIELD_IIP
+
+struct i40e_cloud_filter {
+       struct hlist_node cloud_node;
+       /* cloud filter input set follows */
+       u8 outer_mac[ETH_ALEN];
+       u8 inner_mac[ETH_ALEN];
+       __be16 inner_vlan;
+       __be32 inner_ip[4];
+       u32 tenant_id;
+       u8 flags;
+#define I40E_CLOUD_TNL_TYPE_XVLAN    1
+       u8 tunnel_type;
+       /* filter control */
+       u16 vsi_id; /* vsi number */
+       u16 queue_id;
+       u32 id;
+};
+
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+
 #define I40E_ETH_P_LLDP                        0x88cc
 
 #define I40E_DCB_PRIO_TYPE_STRICT      0
@@ -251,7 +303,6 @@ struct i40e_pf {
        struct pci_dev *pdev;
        struct i40e_hw hw;
        unsigned long state;
-       unsigned long link_check_timeout;
        struct msix_entry *msix_entries;
        bool fc_autoneg_status;
 
@@ -277,8 +328,6 @@ struct i40e_pf {
 
        struct hlist_head fdir_filter_list;
        u16 fdir_pf_active_filters;
-       u16 fd_sb_cnt_idx;
-       u16 fd_atr_cnt_idx;
        unsigned long fd_flush_timestamp;
        u32 fd_flush_cnt;
        u32 fd_add_err;
@@ -288,6 +337,11 @@ struct i40e_pf {
        __be16  vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
        u16 pending_vxlan_bitmap;
 
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+       struct hlist_head cloud_filter_list;
+       u16 num_cloud_filters;
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+
        enum i40e_interrupt_policy int_policy;
        u16 rx_itr_default;
        u16 tx_itr_default;
@@ -300,36 +354,39 @@ struct i40e_pf {
        struct work_struct service_task;
 
        u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED              (u64)(1 << 1)
-#define I40E_FLAG_MSI_ENABLED                  (u64)(1 << 2)
-#define I40E_FLAG_MSIX_ENABLED                 (u64)(1 << 3)
-#define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)
-#define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)
-#define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6)
-#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8)
-#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9)
+#define I40E_FLAG_RX_CSUM_ENABLED              BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED                  BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED                 BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED              BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED                BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED                  BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT         BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE             BIT_ULL(9)
 #ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED                 (u64)(1 << 11)
+#define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13)
-#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14)
-#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 15)
-#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 18)
-#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 19)
-#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 20)
-#define I40E_FLAG_FD_SB_ENABLED                (u64)(1 << 21)
-#define I40E_FLAG_FD_ATR_ENABLED               (u64)(1 << 22)
+#define I40E_FLAG_IN_NETPOLL                   BIT_ULL(12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT            BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT           BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED                BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED                  BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED                BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED               BIT_ULL(22)
 #ifdef HAVE_PTP_1588_CLOCK
-#define I40E_FLAG_PTP                          (u64)(1 << 25)
+#define I40E_FLAG_PTP                          BIT_ULL(25)
 #endif /* HAVE_PTP_1588_CLOCK */
-#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 26)
-#define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
-#define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
-#define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
-#define I40E_FLAG_VEB_MODE_ENABLED             (1ULL << 40)
+#define I40E_FLAG_MFP_ENABLED                  BIT_ULL(26)
+#define I40E_FLAG_VXLAN_FILTER_SYNC            BIT_ULL(27)
+#define I40E_FLAG_PORT_ID_VALID                BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE                  BIT_ULL(29)
+#define I40E_FLAG_VEB_STATS_ENABLED            BIT_ULL(37)
+#define I40E_FLAG_LINK_POLLING_ENABLED         BIT_ULL(39)
+#define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
+#define I40E_FLAG_NO_PCI_LINK_CHECK            BIT_ULL(41)
 
        /* tracks features that get auto disabled by errors */
        u64 auto_disable_flags;
@@ -423,9 +480,11 @@ struct i40e_pf {
        u64 rx_ip4_cso_err;
 #endif
        u16 rss_table_size;
-       /* These are only valid in NPAR modes */
-       u32 npar_max_bw;
-       u32 npar_min_bw;
+       u32 max_bw;
+       u32 min_bw;
+
+       u32 ioremap_len;
+       u32 fd_inv;
 };
 
 struct i40e_mac_filter {
@@ -477,10 +536,12 @@ struct i40e_vsi {
 
        u32 current_netdev_flags;
        unsigned long state;
-#define I40E_VSI_FLAG_FILTER_CHANGED  (1<<0)
-#define I40E_VSI_FLAG_VEB_OWNER       (1<<1)
+#define I40E_VSI_FLAG_FILTER_CHANGED   BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER                BIT(1)
        unsigned long flags;
 
+       /* Per VSI lock to protect elements/list (MAC filter) */
+       spinlock_t mac_filter_list_lock;
        struct list_head mac_filter_list;
 
        /* VSI stats */
@@ -500,6 +561,7 @@ struct i40e_vsi {
 #endif
        u32 tx_restart;
        u32 tx_busy;
+       u64 tx_linearize;
        u32 rx_buf_failed;
        u32 rx_page_failed;
 
@@ -515,6 +577,7 @@ struct i40e_vsi {
         */
        u16 rx_itr_setting;
        u16 tx_itr_setting;
+       u16 int_rate_limit;  /* value in usecs */
 
        u16 rss_table_size;
        u16 rss_size;
@@ -560,6 +623,7 @@ struct i40e_vsi {
        u16 idx;               /* index in pf->vsi[] */
        u16 veb_idx;           /* index of VEB parent */
        struct kobject *kobj;  /* sysfs object */
+       bool current_isup;     /* Sync 'link up' logging */
        bool block_tx_timeout;
 
        /* VSI specific handlers */
@@ -594,6 +658,8 @@ struct i40e_q_vector {
 #endif
        struct rcu_head rcu;    /* to avoid race with update stats on free */
        char name[I40E_INT_NAME_STR_LEN];
+#define ITR_COUNTDOWN_START 100
+       u8 itr_countdown;       /* when 0 should adjust ITR */
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
@@ -603,22 +669,29 @@ struct i40e_device {
 };
 
 /**
- * i40e_fw_version_str - format the FW and NVM version strings
+ * i40e_nvm_version_str - format the NVM version strings
  * @hw: ptr to the hardware info
  **/
-static inline char *i40e_fw_version_str(struct i40e_hw *hw)
+static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
 {
        static char buf[32];
+       u32 full_ver;
+       u8 ver, patch;
+       u16 build;
+
+       full_ver = hw->nvm.oem_ver;
+       ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+       build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT)
+                & I40E_OEM_VER_BUILD_MASK);
+       patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
 
        snprintf(buf, sizeof(buf),
-                "f%d.%d.%05d a%d.%d n%x.%02x e%x",
-                hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
-                hw->aq.api_maj_ver, hw->aq.api_min_ver,
+                "%x.%02x 0x%x %d.%d.%d",
                 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
                        I40E_NVM_VERSION_HI_SHIFT,
                 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
                        I40E_NVM_VERSION_LO_SHIFT,
-                (hw->nvm.eetrack & 0xffffff));
+                hw->nvm.eetrack, ver, build, patch);
 
        return buf;
 }
@@ -674,6 +747,7 @@ extern char i40e_driver_name[];
 extern const char i40e_driver_version_str[];
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
 void i40e_update_stats(struct i40e_vsi *vsi);
 void i40e_update_eth_stats(struct i40e_vsi *vsi);
 #ifdef HAVE_NDO_GET_STATS64
@@ -703,7 +777,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                                        bool is_vf, bool is_netdev);
 void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
                     bool is_vf, bool is_netdev);
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                                u16 uplink, u32 param1);
 int i40e_vsi_release(struct i40e_vsi *vsi);
@@ -732,6 +806,12 @@ void i40e_veb_release(struct i40e_veb *veb);
 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
 i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+int i40e_add_del_cloud_filter(struct i40e_pf *pf,
+                                        struct i40e_cloud_filter *filter,
+                                        struct i40e_vsi *vsi,
+                                        bool add);
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
 void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
 void i40e_pf_reset_stats(struct i40e_pf *pf);
 #ifdef CONFIG_DEBUG_FS
@@ -745,7 +825,24 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
 static inline void i40e_dbg_init(void) {}
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector, without base_vector
+ **/
+static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+             (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+       wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
+       /* skip the flush */
+}
+
 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
@@ -799,7 +896,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
 u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
 void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
 void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
-int i40e_init_pf_fcoe(struct i40e_pf *pf);
+void i40e_init_pf_fcoe(struct i40e_pf *pf);
 int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
 void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
 int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
@@ -837,7 +934,8 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
 int i40e_configfs_init(void);
 void i40e_configfs_exit(void);
 #endif /* CONFIG_CONFIGFS_FS */
-i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
 #endif /* _I40E_H_ */
similarity index 95%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_adminq.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.c
index d7de7661b9f12c441d8f5b48f12d8569e36b2117..58e7ed50e445828bd5c5994bcad52abb60dc567f 100644 (file)
@@ -480,8 +480,12 @@ i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
 {
        i40e_status ret_code = I40E_SUCCESS;
 
-       if (hw->aq.asq.count == 0)
-               return I40E_ERR_NOT_READY;
+       i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+       if (hw->aq.asq.count == 0) {
+               ret_code = I40E_ERR_NOT_READY;
+               goto shutdown_asq_out;
+       }
 
        /* Stop firmware AdminQ processing */
        wr32(hw, hw->aq.asq.head, 0);
@@ -490,16 +494,13 @@ i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
        wr32(hw, hw->aq.asq.bal, 0);
        wr32(hw, hw->aq.asq.bah, 0);
 
-       /* make sure spinlock is available */
-       i40e_acquire_spinlock(&hw->aq.asq_spinlock);
-
        hw->aq.asq.count = 0; /* to indicate uninitialized queue */
 
        /* free ring buffers */
        i40e_free_asq_bufs(hw);
 
+shutdown_asq_out:
        i40e_release_spinlock(&hw->aq.asq_spinlock);
-
        return ret_code;
 }
 
@@ -513,8 +514,12 @@ i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
 {
        i40e_status ret_code = I40E_SUCCESS;
 
-       if (hw->aq.arq.count == 0)
-               return I40E_ERR_NOT_READY;
+       i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+       if (hw->aq.arq.count == 0) {
+               ret_code = I40E_ERR_NOT_READY;
+               goto shutdown_arq_out;
+       }
 
        /* Stop firmware AdminQ processing */
        wr32(hw, hw->aq.arq.head, 0);
@@ -523,16 +528,13 @@ i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
        wr32(hw, hw->aq.arq.bal, 0);
        wr32(hw, hw->aq.arq.bah, 0);
 
-       /* make sure spinlock is available */
-       i40e_acquire_spinlock(&hw->aq.arq_spinlock);
-
        hw->aq.arq.count = 0; /* to indicate uninitialized queue */
 
        /* free ring buffers */
        i40e_free_arq_bufs(hw);
 
+shutdown_arq_out:
        i40e_release_spinlock(&hw->aq.arq_spinlock);
-
        return ret_code;
 }
 
@@ -551,6 +553,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 {
        i40e_status ret_code;
        u16 eetrack_lo, eetrack_hi;
+       u16 cfg_ptr, oem_hi, oem_lo;
        int retry = 0;
        /* verify input for valid configuration */
        if ((hw->aq.num_arq_entries == 0) ||
@@ -608,6 +611,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
        hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+       i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+       i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+                          &oem_hi);
+       i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+                          &oem_lo);
+       hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
 
        if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
                ret_code = I40E_ERR_FIRMWARE_API_VERSION;
@@ -658,6 +667,9 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
        i40e_destroy_spinlock(&hw->aq.asq_spinlock);
        i40e_destroy_spinlock(&hw->aq.arq_spinlock);
 
+       if (hw->nvm_buff.va)
+               i40e_free_virt_mem(hw, &hw->nvm_buff);
+
        return ret_code;
 }
 
@@ -677,16 +689,16 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
 
        desc = I40E_ADMINQ_DESC(*asq, ntc);
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
+
        while (rd32(hw, hw->aq.asq.head) != ntc) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-                          "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
-                          rd32(hw, hw->aq.asq.head));
+                          "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 
                if (details->callback) {
                        I40E_ADMINQ_CALLBACK cb_func =
                                        (I40E_ADMINQ_CALLBACK)details->callback;
-                       i40e_memcpy(&desc_cb, desc,
-                                   sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
+                       i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
+                                   I40E_DMA_TO_DMA);
                        cb_func(hw, &desc_cb);
                }
                i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
@@ -744,19 +756,23 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        u16  retval = 0;
        u32  val = 0;
 
-       val = rd32(hw, hw->aq.asq.head);
-       if (val >= hw->aq.num_asq_entries) {
+       i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+       hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+       if (hw->aq.asq.count == 0) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-                          "AQTX: head overrun at %d\n", val);
+                          "AQTX: Admin queue not initialized.\n");
                status = I40E_ERR_QUEUE_EMPTY;
-               goto asq_send_command_exit;
+               goto asq_send_command_error;
        }
 
-       if (hw->aq.asq.count == 0) {
+       val = rd32(hw, hw->aq.asq.head);
+       if (val >= hw->aq.num_asq_entries) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-                          "AQTX: Admin queue not initialized.\n");
+                          "AQTX: head overrun at %d\n", val);
                status = I40E_ERR_QUEUE_EMPTY;
-               goto asq_send_command_exit;
+               goto asq_send_command_error;
        }
 
        details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
@@ -786,8 +802,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        desc->flags &= ~CPU_TO_LE16(details->flags_dis);
        desc->flags |= CPU_TO_LE16(details->flags_ena);
 
-       i40e_acquire_spinlock(&hw->aq.asq_spinlock);
-
        if (buff_size > hw->aq.asq_buf_size) {
                i40e_debug(hw,
                           I40E_DEBUG_AQ_MESSAGE,
@@ -901,6 +915,11 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                   "AQTX: desc and buffer writeback:\n");
        i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
 
+       /* save writeback aq if requested */
+       if (details->wb_desc)
+               i40e_memcpy(details->wb_desc, desc_on_ring,
+                           sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
+
        /* update the error if time out occurred */
        if ((!cmd_completed) &&
            (!details->async && !details->postpone)) {
@@ -912,7 +931,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 
 asq_send_command_error:
        i40e_release_spinlock(&hw->aq.asq_spinlock);
-asq_send_command_exit:
        return status;
 }
 
@@ -1029,6 +1047,19 @@ clean_arq_element_out:
                        i40e_release_nvm(hw);
                        hw->aq.nvm_release_on_done = false;
                }
+
+               switch (hw->nvmupd_state) {
+               case I40E_NVMUPD_STATE_INIT_WAIT:
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+                       break;
+
+               case I40E_NVMUPD_STATE_WRITE_WAIT:
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+                       break;
+
+               default:
+                       break;
+               }
        }
 
        return ret_code;
@@ -1040,9 +1071,6 @@ void i40e_resume_aq(struct i40e_hw *hw)
        hw->aq.asq.next_to_use = 0;
        hw->aq.asq.next_to_clean = 0;
 
-#if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
-#error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
-#endif
        i40e_config_asq_regs(hw);
 
        hw->aq.arq.next_to_use = 0;
similarity index 94%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_adminq.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.h
index f6e4455483e0a859d414ab092fc1b65e39888d07..3a80aa0168ff51e906029ee98de36a787a0d08a0 100644 (file)
@@ -69,6 +69,7 @@ struct i40e_asq_cmd_details {
        u16 flags_dis;
        bool async;
        bool postpone;
+       struct i40e_aq_desc *wb_desc;
 };
 
 #define I40E_ADMINQ_DETAILS(R, i)   \
@@ -108,9 +109,10 @@ struct i40e_adminq_info {
 
 /**
  * i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
  **/
-static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
+static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
 {
        int aq_to_posix[] = {
                0,           /* I40E_AQ_RC_OK */
@@ -142,8 +144,9 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
        if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
                return -EAGAIN;
 
-       if (aq_rc >= (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))
+       if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
                return -ERANGE;
+
        return aq_to_posix[aq_rc];
 }
 
similarity index 96%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_adminq_cmd.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq_cmd.h
index d7b5a2d52aca815d24708d34da1b110c4fc68853..aa4a8b2f057157cad8923022f13d333889482f1c 100644 (file)
@@ -34,7 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
-#define I40E_FW_API_VERSION_MINOR      0x0002
+#define I40E_FW_API_VERSION_MINOR      0x0004
 
 struct i40e_aq_desc {
        __le16 flags;
@@ -132,12 +132,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_list_func_capabilities     = 0x000A,
        i40e_aqc_opc_list_dev_capabilities      = 0x000B,
 
-       i40e_aqc_opc_set_cppm_configuration     = 0x0103,
-       i40e_aqc_opc_set_arp_proxy_entry        = 0x0104,
-       i40e_aqc_opc_set_ns_proxy_entry         = 0x0105,
-
        /* LAA */
-       i40e_aqc_opc_mng_laa            = 0x0106,   /* AQ obsolete */
        i40e_aqc_opc_mac_address_read   = 0x0107,
        i40e_aqc_opc_mac_address_write  = 0x0108,
 
@@ -232,6 +227,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_nvm_update                 = 0x0703,
        i40e_aqc_opc_nvm_config_read            = 0x0704,
        i40e_aqc_opc_nvm_config_write           = 0x0705,
+       i40e_aqc_opc_oem_post_update            = 0x0720,
 
        /* virtualization commands */
        i40e_aqc_opc_send_msg_to_pf             = 0x0801,
@@ -262,7 +258,6 @@ enum i40e_admin_queue_opc {
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
        i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
-       i40e_aqc_opc_tunnel_key_structure       = 0x0B10,
 
        /* Async Events */
        i40e_aqc_opc_event_lan_overflow         = 0x1001,
@@ -274,8 +269,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_oem_ocbb_initialize        = 0xFE03,
 
        /* debug commands */
-       i40e_aqc_opc_debug_get_deviceid         = 0xFF00,
-       i40e_aqc_opc_debug_set_mode             = 0xFF01,
        i40e_aqc_opc_debug_read_reg             = 0xFF03,
        i40e_aqc_opc_debug_write_reg            = 0xFF04,
        i40e_aqc_opc_debug_modify_reg           = 0xFF07,
@@ -509,7 +502,8 @@ struct i40e_aqc_mac_address_read {
 #define I40E_AQC_SAN_ADDR_VALID                0x20
 #define I40E_AQC_PORT_ADDR_VALID       0x40
 #define I40E_AQC_WOL_ADDR_VALID                0x80
-#define I40E_AQC_ADDR_VALID_MASK       0xf0
+#define I40E_AQC_MC_MAG_EN_VALID       0x100
+#define I40E_AQC_ADDR_VALID_MASK       0x1F0
        u8      reserved[6];
        __le32  addr_high;
        __le32  addr_low;
@@ -532,7 +526,9 @@ struct i40e_aqc_mac_address_write {
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY   0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL    0x4000
 #define I40E_AQC_WRITE_TYPE_PORT       0x8000
-#define I40E_AQC_WRITE_TYPE_MASK       0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG      0xC000
+#define I40E_AQC_WRITE_TYPE_MASK       0xC000
+
        __le16  mac_sah;
        __le32  mac_sal;
        u8      reserved[8];
@@ -1068,6 +1064,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
        __le16  seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK                0x3FF
        __le16  vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK             0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID            0x8000
        u8      reserved[8];
 };
@@ -1717,11 +1714,13 @@ struct i40e_aqc_get_link_status {
        u8      phy_type;    /* i40e_aq_phy_type   */
        u8      link_speed;  /* i40e_aq_link_speed */
        u8      link_info;
-#define I40E_AQ_LINK_UP                        0x01
+#define I40E_AQ_LINK_UP                        0x01    /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION       0x01
 #define I40E_AQ_LINK_FAULT             0x02
 #define I40E_AQ_LINK_FAULT_TX          0x04
 #define I40E_AQ_LINK_FAULT_RX          0x08
 #define I40E_AQ_LINK_FAULT_REMOTE      0x10
+#define I40E_AQ_LINK_UP_PORT           0x20
 #define I40E_AQ_MEDIA_AVAILABLE                0x40
 #define I40E_AQ_SIGNAL_DETECT          0x80
        u8      an_info;
@@ -1883,6 +1882,26 @@ struct i40e_aqc_nvm_config_data_immediate_field {
 
 I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
 
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+ struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA      0x01
+       u8 sel_data;
+       u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+       u8 str_len;
+       u8 dev_addr;
+       __le16 eeprom_addr;
+       u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
 /* Send to PF command (indirect 0x0801) id is only used by PF
  * Send to VF command (indirect 0x0802) id is only used by PF
  * Send to Peer PF command (indirect 0x0803)
@@ -2056,12 +2075,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
 #define I40E_AQC_CEE_APP_ISCSI_MASK    (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
 #define I40E_AQC_CEE_APP_FIP_SHIFT     0x8
 #define I40E_AQC_CEE_APP_FIP_MASK      (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
 #define I40E_AQC_CEE_PG_STATUS_SHIFT   0x0
 #define I40E_AQC_CEE_PG_STATUS_MASK    (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
 #define I40E_AQC_CEE_PFC_STATUS_SHIFT  0x3
 #define I40E_AQC_CEE_PFC_STATUS_MASK   (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
 #define I40E_AQC_CEE_APP_STATUS_SHIFT  0x8
 #define I40E_AQC_CEE_APP_STATUS_MASK   (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK  (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT        0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT  0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK   (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct.  Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
 struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
        u8      reserved1;
        u8      oper_num_tc;
@@ -2069,9 +2104,9 @@ struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
        u8      reserved2;
        u8      oper_tc_bw[8];
        u8      oper_pfc_en;
-       u8      reserved3;
+       u8      reserved3[2];
        __le16  oper_app_prio;
-       u8      reserved4;
+       u8      reserved4[2];
        __le16  tlv_status;
 };
 
@@ -2094,7 +2129,13 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
  */
 struct i40e_aqc_lldp_set_local_mib {
 #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT       0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK                (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK        (1 << \
+                                       SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB        0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT   (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK    (1 << \
+                               SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS         0x1
        u8      type;
        u8      reserved0;
        __le16  length;
similarity index 91%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_common.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_common.c
index d59e344e0b2a8f681adab3af5ad8268be6b42f54..2aded6bbd51a63d8a18d6e4636a539b894c56b68 100644 (file)
@@ -51,7 +51,9 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_QSFP_B:
                case I40E_DEV_ID_QSFP_C:
                case I40E_DEV_ID_10G_BASE_T:
+               case I40E_DEV_ID_10G_BASE_T4:
                case I40E_DEV_ID_20G_KR2:
+               case I40E_DEV_ID_20G_KR2_A:
                        hw->mac.type = I40E_MAC_XL710;
                        break;
                case I40E_DEV_ID_VF:
@@ -71,6 +73,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
        return status;
 }
 
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+       switch (aq_err) {
+       case I40E_AQ_RC_OK:
+               return "OK";
+       case I40E_AQ_RC_EPERM:
+               return "I40E_AQ_RC_EPERM";
+       case I40E_AQ_RC_ENOENT:
+               return "I40E_AQ_RC_ENOENT";
+       case I40E_AQ_RC_ESRCH:
+               return "I40E_AQ_RC_ESRCH";
+       case I40E_AQ_RC_EINTR:
+               return "I40E_AQ_RC_EINTR";
+       case I40E_AQ_RC_EIO:
+               return "I40E_AQ_RC_EIO";
+       case I40E_AQ_RC_ENXIO:
+               return "I40E_AQ_RC_ENXIO";
+       case I40E_AQ_RC_E2BIG:
+               return "I40E_AQ_RC_E2BIG";
+       case I40E_AQ_RC_EAGAIN:
+               return "I40E_AQ_RC_EAGAIN";
+       case I40E_AQ_RC_ENOMEM:
+               return "I40E_AQ_RC_ENOMEM";
+       case I40E_AQ_RC_EACCES:
+               return "I40E_AQ_RC_EACCES";
+       case I40E_AQ_RC_EFAULT:
+               return "I40E_AQ_RC_EFAULT";
+       case I40E_AQ_RC_EBUSY:
+               return "I40E_AQ_RC_EBUSY";
+       case I40E_AQ_RC_EEXIST:
+               return "I40E_AQ_RC_EEXIST";
+       case I40E_AQ_RC_EINVAL:
+               return "I40E_AQ_RC_EINVAL";
+       case I40E_AQ_RC_ENOTTY:
+               return "I40E_AQ_RC_ENOTTY";
+       case I40E_AQ_RC_ENOSPC:
+               return "I40E_AQ_RC_ENOSPC";
+       case I40E_AQ_RC_ENOSYS:
+               return "I40E_AQ_RC_ENOSYS";
+       case I40E_AQ_RC_ERANGE:
+               return "I40E_AQ_RC_ERANGE";
+       case I40E_AQ_RC_EFLUSHED:
+               return "I40E_AQ_RC_EFLUSHED";
+       case I40E_AQ_RC_BAD_ADDR:
+               return "I40E_AQ_RC_BAD_ADDR";
+       case I40E_AQ_RC_EMODE:
+               return "I40E_AQ_RC_EMODE";
+       case I40E_AQ_RC_EFBIG:
+               return "I40E_AQ_RC_EFBIG";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+       return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+       switch (stat_err) {
+       case I40E_SUCCESS:
+               return "OK";
+       case I40E_ERR_NVM:
+               return "I40E_ERR_NVM";
+       case I40E_ERR_NVM_CHECKSUM:
+               return "I40E_ERR_NVM_CHECKSUM";
+       case I40E_ERR_PHY:
+               return "I40E_ERR_PHY";
+       case I40E_ERR_CONFIG:
+               return "I40E_ERR_CONFIG";
+       case I40E_ERR_PARAM:
+               return "I40E_ERR_PARAM";
+       case I40E_ERR_MAC_TYPE:
+               return "I40E_ERR_MAC_TYPE";
+       case I40E_ERR_UNKNOWN_PHY:
+               return "I40E_ERR_UNKNOWN_PHY";
+       case I40E_ERR_LINK_SETUP:
+               return "I40E_ERR_LINK_SETUP";
+       case I40E_ERR_ADAPTER_STOPPED:
+               return "I40E_ERR_ADAPTER_STOPPED";
+       case I40E_ERR_INVALID_MAC_ADDR:
+               return "I40E_ERR_INVALID_MAC_ADDR";
+       case I40E_ERR_DEVICE_NOT_SUPPORTED:
+               return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+       case I40E_ERR_MASTER_REQUESTS_PENDING:
+               return "I40E_ERR_MASTER_REQUESTS_PENDING";
+       case I40E_ERR_INVALID_LINK_SETTINGS:
+               return "I40E_ERR_INVALID_LINK_SETTINGS";
+       case I40E_ERR_AUTONEG_NOT_COMPLETE:
+               return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+       case I40E_ERR_RESET_FAILED:
+               return "I40E_ERR_RESET_FAILED";
+       case I40E_ERR_SWFW_SYNC:
+               return "I40E_ERR_SWFW_SYNC";
+       case I40E_ERR_NO_AVAILABLE_VSI:
+               return "I40E_ERR_NO_AVAILABLE_VSI";
+       case I40E_ERR_NO_MEMORY:
+               return "I40E_ERR_NO_MEMORY";
+       case I40E_ERR_BAD_PTR:
+               return "I40E_ERR_BAD_PTR";
+       case I40E_ERR_RING_FULL:
+               return "I40E_ERR_RING_FULL";
+       case I40E_ERR_INVALID_PD_ID:
+               return "I40E_ERR_INVALID_PD_ID";
+       case I40E_ERR_INVALID_QP_ID:
+               return "I40E_ERR_INVALID_QP_ID";
+       case I40E_ERR_INVALID_CQ_ID:
+               return "I40E_ERR_INVALID_CQ_ID";
+       case I40E_ERR_INVALID_CEQ_ID:
+               return "I40E_ERR_INVALID_CEQ_ID";
+       case I40E_ERR_INVALID_AEQ_ID:
+               return "I40E_ERR_INVALID_AEQ_ID";
+       case I40E_ERR_INVALID_SIZE:
+               return "I40E_ERR_INVALID_SIZE";
+       case I40E_ERR_INVALID_ARP_INDEX:
+               return "I40E_ERR_INVALID_ARP_INDEX";
+       case I40E_ERR_INVALID_FPM_FUNC_ID:
+               return "I40E_ERR_INVALID_FPM_FUNC_ID";
+       case I40E_ERR_QP_INVALID_MSG_SIZE:
+               return "I40E_ERR_QP_INVALID_MSG_SIZE";
+       case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+               return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+       case I40E_ERR_INVALID_FRAG_COUNT:
+               return "I40E_ERR_INVALID_FRAG_COUNT";
+       case I40E_ERR_QUEUE_EMPTY:
+               return "I40E_ERR_QUEUE_EMPTY";
+       case I40E_ERR_INVALID_ALIGNMENT:
+               return "I40E_ERR_INVALID_ALIGNMENT";
+       case I40E_ERR_FLUSHED_QUEUE:
+               return "I40E_ERR_FLUSHED_QUEUE";
+       case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+               return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+       case I40E_ERR_INVALID_IMM_DATA_SIZE:
+               return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+       case I40E_ERR_TIMEOUT:
+               return "I40E_ERR_TIMEOUT";
+       case I40E_ERR_OPCODE_MISMATCH:
+               return "I40E_ERR_OPCODE_MISMATCH";
+       case I40E_ERR_CQP_COMPL_ERROR:
+               return "I40E_ERR_CQP_COMPL_ERROR";
+       case I40E_ERR_INVALID_VF_ID:
+               return "I40E_ERR_INVALID_VF_ID";
+       case I40E_ERR_INVALID_HMCFN_ID:
+               return "I40E_ERR_INVALID_HMCFN_ID";
+       case I40E_ERR_BACKING_PAGE_ERROR:
+               return "I40E_ERR_BACKING_PAGE_ERROR";
+       case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+               return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+       case I40E_ERR_INVALID_PBLE_INDEX:
+               return "I40E_ERR_INVALID_PBLE_INDEX";
+       case I40E_ERR_INVALID_SD_INDEX:
+               return "I40E_ERR_INVALID_SD_INDEX";
+       case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+               return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+       case I40E_ERR_INVALID_SD_TYPE:
+               return "I40E_ERR_INVALID_SD_TYPE";
+       case I40E_ERR_MEMCPY_FAILED:
+               return "I40E_ERR_MEMCPY_FAILED";
+       case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+               return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+       case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+               return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+       case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+               return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+       case I40E_ERR_SRQ_ENABLED:
+               return "I40E_ERR_SRQ_ENABLED";
+       case I40E_ERR_ADMIN_QUEUE_ERROR:
+               return "I40E_ERR_ADMIN_QUEUE_ERROR";
+       case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+               return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+       case I40E_ERR_BUF_TOO_SHORT:
+               return "I40E_ERR_BUF_TOO_SHORT";
+       case I40E_ERR_ADMIN_QUEUE_FULL:
+               return "I40E_ERR_ADMIN_QUEUE_FULL";
+       case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+               return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+       case I40E_ERR_BAD_IWARP_CQE:
+               return "I40E_ERR_BAD_IWARP_CQE";
+       case I40E_ERR_NVM_BLANK_MODE:
+               return "I40E_ERR_NVM_BLANK_MODE";
+       case I40E_ERR_NOT_IMPLEMENTED:
+               return "I40E_ERR_NOT_IMPLEMENTED";
+       case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+               return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+       case I40E_ERR_DIAG_TEST_FAILED:
+               return "I40E_ERR_DIAG_TEST_FAILED";
+       case I40E_ERR_NOT_READY:
+               return "I40E_ERR_NOT_READY";
+       case I40E_NOT_SUPPORTED:
+               return "I40E_NOT_SUPPORTED";
+       case I40E_ERR_FIRMWARE_API_VERSION:
+               return "I40E_ERR_FIRMWARE_API_VERSION";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+       return hw->err_str;
+}
+
 /**
  * i40e_debug_aq
  * @hw: debug mask related to admin queue
@@ -143,9 +351,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
 bool i40e_check_asq_alive(struct i40e_hw *hw)
 {
        if (hw->aq.asq.len)
-               return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
-       else
-               return false;
+               return !!(rd32(hw, hw->aq.asq.len) &
+                       I40E_PF_ATQLEN_ATQENABLE_MASK);
+       return false;
 }
 
 /**
@@ -878,7 +1086,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
        return media;
 }
 
-#define I40E_PF_RESET_WAIT_COUNT       110
+#define I40E_PF_RESET_WAIT_COUNT       200
 /**
  * i40e_pf_reset - Reset the PF
  * @hw: pointer to the hardware structure
@@ -900,7 +1108,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
        grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
                        I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
                        I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
-       for (cnt = 0; cnt < grst_del + 2; cnt++) {
+       for (cnt = 0; cnt < grst_del + 10; cnt++) {
                reg = rd32(hw, I40E_GLGEN_RSTAT);
                if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
                        break;
@@ -1119,9 +1327,11 @@ u32 i40e_led_get(struct i40e_hw *hw)
                if (!gpio_val)
                        continue;
 
-               /* ignore gpio LED src mode entries related to the activity LEDs */
-               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
-                       I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+               /* ignore gpio LED src mode entries related to the activity
+                *  LEDs
+                */
+               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
                switch (current_mode) {
                case I40E_COMBINED_ACTIVITY:
                case I40E_FILTER_ACTIVITY:
@@ -1165,9 +1375,11 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
                if (!gpio_val)
                        continue;
 
-               /* ignore gpio LED src mode entries related to the activity LEDs */
-               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
-                       I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+               /* ignore gpio LED src mode entries related to the activity
+                * LEDs
+                */
+               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
                switch (current_mode) {
                case I40E_COMBINED_ACTIVITY:
                case I40E_FILTER_ACTIVITY:
@@ -1186,9 +1398,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
                        blink = false;
 
                if (blink)
-                       gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+                       gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
                else
-                       gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+                       gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 
                wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
                break;
@@ -1240,6 +1452,9 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
        if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
                status = I40E_ERR_UNKNOWN_PHY;
 
+       if (report_init)
+               hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+
        return status;
 }
 
@@ -1340,14 +1555,14 @@ i40e_status i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
                        *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
        }
        /* Update the link info */
-       status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+       status = i40e_update_link_info(hw);
        if (status) {
                /* Wait a little bit (on 40G cards it sometimes takes a really
                 * long time for link to come back from the atomic reset)
                 * and try once more
                 */
                msleep(1000);
-               status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+               status = i40e_update_link_info(hw);
        }
        if (status)
                *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -1536,7 +1751,6 @@ aq_get_link_info_exit:
        return status;
 }
 
-
 /**
  * i40e_aq_set_phy_int_mask
  * @hw: pointer to the hw struct
@@ -1847,6 +2061,74 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
        return status;
 }
 
+/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+                               u16 seid, bool enable, u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       i40e_status status;
+       u16 flags = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       if (enable)
+               flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+       cmd->promiscuous_flags = CPU_TO_LE16(flags);
+       cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+       cmd->seid = CPU_TO_LE16(seid);
+       cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+                               u16 seid, bool enable, u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       i40e_status status;
+       u16 flags = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       if (enable)
+               flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+       cmd->promiscuous_flags = CPU_TO_LE16(flags);
+       cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+       cmd->seid = CPU_TO_LE16(seid);
+       cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
 /**
  * i40e_aq_set_vsi_broadcast
  * @hw: pointer to the hw struct
@@ -2078,29 +2360,57 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
 /**
  * i40e_get_link_status - get status of the HW network link
  * @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
  *
- * Returns true if link is up, false if link is down.
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
  *
  * Side effect: LinkStatusEvent reporting becomes enabled
  **/
-bool i40e_get_link_status(struct i40e_hw *hw)
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
 {
        i40e_status status = I40E_SUCCESS;
-       bool link_status = false;
 
        if (hw->phy.get_link_info) {
-               status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+               status = i40e_update_link_info(hw);
 
                if (status != I40E_SUCCESS)
-                       goto i40e_get_link_status_exit;
+                       i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+                                  status);
        }
 
-       link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+       *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
 
-i40e_get_link_status_exit:
-       return link_status;
+       return status;
 }
 
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+i40e_status i40e_update_link_info(struct i40e_hw *hw)
+{
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       i40e_status status = I40E_SUCCESS;
+
+       status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+       if (status)
+               return status;
+
+       if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+               status = i40e_aq_get_phy_capabilities(hw, false, false,
+                                                     &abilities, NULL);
+               if (status)
+                       return status;
+
+               memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+                       sizeof(hw->phy.link_info.module_type));
+       }
+
+       return status;
+}
+
+
 /**
  * i40e_get_link_speed
  * @hw: pointer to the hw struct
@@ -2229,6 +2539,7 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
                *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
        if (floating) {
                u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
+
                if (flags & I40E_AQC_ADD_VEB_FLOATING)
                        *floating = true;
                else
@@ -2754,6 +3065,27 @@ i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw,
        return status;
 }
 
+/**
+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw,
+                               void *buff, u16 buff_size,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+       if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
+               status = I40E_ERR_NOT_IMPLEMENTED;
+
+       return status;
+}
+
 /**
  * i40e_aq_erase_nvm
  * @hw: pointer to the hw struct
@@ -2816,12 +3148,13 @@ i40e_aq_erase_nvm_exit:
 #define I40E_DEV_FUNC_CAP_MSIX_VF      0x44
 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR        0x45
 #define I40E_DEV_FUNC_CAP_IEEE_1588    0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1   0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10       0xF1
 #define I40E_DEV_FUNC_CAP_CEM          0xF2
 #define I40E_DEV_FUNC_CAP_IWARP                0x51
 #define I40E_DEV_FUNC_CAP_LED          0x61
 #define I40E_DEV_FUNC_CAP_SDP          0x62
 #define I40E_DEV_FUNC_CAP_MDIO         0x63
+#define I40E_DEV_FUNC_CAP_WR_CSR_PROT  0x64
 
 /**
  * i40e_parse_discover_capabilities
@@ -2840,6 +3173,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
        u32 valid_functions, num_functions;
        u32 number, logical_id, phys_id;
        struct i40e_hw_capabilities *p;
+       u8 major_rev;
        u32 i = 0;
        u16 id;
 
@@ -2857,6 +3191,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                number = LE32_TO_CPU(cap->number);
                logical_id = LE32_TO_CPU(cap->logical_id);
                phys_id = LE32_TO_CPU(cap->phys_id);
+               major_rev = cap->major_rev;
 
                switch (id) {
                case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2931,9 +3266,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                case I40E_DEV_FUNC_CAP_MSIX_VF:
                        p->num_msix_vectors_vf = number;
                        break;
-               case I40E_DEV_FUNC_CAP_MFP_MODE_1:
-                       if (number == 1)
-                               p->mfp_mode_1 = true;
+               case I40E_DEV_FUNC_CAP_FLEX10:
+                       if (major_rev == 1) {
+                               if (number == 1) {
+                                       p->flex10_enable = true;
+                                       p->flex10_capable = true;
+                               }
+                       } else {
+                               /* Capability revision >= 2 */
+                               if (number & 1)
+                                       p->flex10_enable = true;
+                               if (number & 2)
+                                       p->flex10_capable = true;
+                       }
+                       p->flex10_mode = logical_id;
+                       p->flex10_status = phys_id;
                        break;
                case I40E_DEV_FUNC_CAP_CEM:
                        if (number == 1)
@@ -2966,16 +3313,23 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                        p->fd_filters_guaranteed = number;
                        p->fd_filters_best_effort = logical_id;
                        break;
+               case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+                       p->wr_csr_prot = (u64)number;
+                       p->wr_csr_prot |= (u64)logical_id << 32;
+                       break;
                default:
                        break;
                }
        }
 
+       if (p->fcoe)
+               i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
 #ifdef I40E_FCOE_ENA
        /* Software override ensuring FCoE is disabled if npar or mfp
         * mode because it is not supported in these modes.
         */
-       if (p->npar_enable || p->mfp_mode_1)
+       if (p->npar_enable || p->flex10_enable)
                p->fcoe = false;
 #else
        /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
@@ -4588,6 +4942,28 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
        return status;
 }
 
+/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+                                                   u16 seid)
+{
+       u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+                  I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+                  I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+       u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+       i40e_status status;
+
+       status = i40e_aq_add_rem_control_packet_filter(hw, 0, ethtype, flag,
+                                                      seid, 0, true, NULL,
+                                                      NULL);
+       if (status)
+               hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
 /**
  * i40e_aq_add_cloud_filters
  * @hw: pointer to the hardware structure
@@ -4727,7 +5103,7 @@ i40e_status i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
 
        cmd_resp->address = CPU_TO_LE32(addr);
        cmd_resp->length = CPU_TO_LE32(dw_count);
-       cmd_resp->addr_high = CPU_TO_LE32(high_16_bits((u64)buffer));
+       cmd_resp->addr_high = CPU_TO_LE32(upper_32_bits((u64)buffer));
        cmd_resp->addr_low = CPU_TO_LE32(lower_32_bits((u64)buffer));
 
        status = i40e_asq_send_command(hw, &desc, buffer,
@@ -4965,6 +5341,63 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
        }
 }
 
+/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+                               u8 table_id, u32 start_index, u16 buff_size,
+                               void *buff, u16 *ret_buff_size,
+                               u8 *ret_next_table, u32 *ret_next_index,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_debug_dump_internals *cmd =
+               (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+       struct i40e_aqc_debug_dump_internals *resp =
+               (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+       i40e_status status;
+
+       if (buff_size == 0 || !buff)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_debug_dump_internals);
+       /* Indirect Command */
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       if (buff_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       cmd->cluster_id = cluster_id;
+       cmd->table_id = table_id;
+       cmd->idx = CPU_TO_LE32(start_index);
+
+       desc.datalen = CPU_TO_LE16(buff_size);
+
+       status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+       if (!status) {
+               if (ret_buff_size != NULL)
+                       *ret_buff_size = LE16_TO_CPU(desc.datalen);
+               if (ret_next_table != NULL)
+                       *ret_next_table = resp->table_id;
+               if (ret_next_index != NULL)
+                       *ret_next_index = LE32_TO_CPU(resp->idx);
+       }
+
+       return status;
+}
+
 /**
  * i40e_read_bw_from_alt_ram
  * @hw: pointer to the hardware structure
@@ -4984,11 +5417,11 @@ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
 
        /* Calculate the address of the min/max bw registers */
        max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
-               I40E_ALT_STRUCT_MAX_BW_OFFSET +
-               (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+                     I40E_ALT_STRUCT_MAX_BW_OFFSET +
+                     (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
        min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
-               I40E_ALT_STRUCT_MIN_BW_OFFSET +
-               (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+                     I40E_ALT_STRUCT_MIN_BW_OFFSET +
+                     (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
 
        /* Read the bandwidths from alt ram */
        status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
similarity index 93%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_configfs.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_configfs.c
index edaeeaa117d6091d5be7998d27f8bb8b73b4d836..fb260b64f03354b541c94070aa70a9bc219be07c 100644 (file)
@@ -113,15 +113,15 @@ static ssize_t i40e_cfgfs_vsi_attr_show(struct config_item *item,
        if (strncmp(attr->ca_name, "min_bw", 6) == 0)
                count = sprintf(page, "%s %s %d%%\n",
                                i40e_cfgfs_vsi->vsi->netdev->name,
-                               (pf->npar_min_bw & I40E_ALT_BW_RELATIVE_MASK) ?
+                               (pf->min_bw & I40E_ALT_BW_RELATIVE_MASK) ?
                                "Relative Min BW" : "Absolute Min BW",
-                               pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK);
+                               pf->min_bw & I40E_ALT_BW_VALUE_MASK);
        else if (strncmp(attr->ca_name, "max_bw", 6) == 0)
                count = sprintf(page, "%s %s %d%%\n",
                                i40e_cfgfs_vsi->vsi->netdev->name,
-                               (pf->npar_max_bw & I40E_ALT_BW_RELATIVE_MASK) ?
+                               (pf->max_bw & I40E_ALT_BW_RELATIVE_MASK) ?
                                "Relative Max BW" : "Absolute Max BW",
-                               pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK);
+                               pf->max_bw & I40E_ALT_BW_VALUE_MASK);
        else if (strncmp(attr->ca_name, "ports", 5) == 0)
                count = sprintf(page, "%d\n",
                                pf->hw.num_ports);
@@ -163,28 +163,28 @@ static ssize_t i40e_cfgfs_vsi_attr_store(struct config_item *item,
                return -ERANGE;
 
        if (strncmp(attr->ca_name, "min_bw", 6) == 0) {
-               if (tmp > (pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK))
+               if (tmp > (pf->max_bw & I40E_ALT_BW_VALUE_MASK))
                        return -ERANGE;
                /* Preserve the valid and relative BW bits - the rest is
                 * don't care.
                 */
-               pf->npar_min_bw &= (I40E_ALT_BW_RELATIVE_MASK |
+               pf->min_bw &= (I40E_ALT_BW_RELATIVE_MASK |
                                    I40E_ALT_BW_VALID_MASK);
-               pf->npar_min_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
-               i40e_set_npar_bw_setting(pf);
+               pf->min_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
+               i40e_set_partition_bw_setting(pf);
        } else if (strncmp(attr->ca_name, "max_bw", 6) == 0) {
                if (tmp < 1 ||
-                   tmp < (pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK))
+                   tmp < (pf->min_bw & I40E_ALT_BW_VALUE_MASK))
                        return -ERANGE;
                /* Preserve the valid and relative BW bits - the rest is
                 * don't care.
                 */
-               pf->npar_max_bw &= (I40E_ALT_BW_RELATIVE_MASK |
+               pf->max_bw &= (I40E_ALT_BW_RELATIVE_MASK |
                                    I40E_ALT_BW_VALID_MASK);
-               pf->npar_max_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
-               i40e_set_npar_bw_setting(pf);
+               pf->max_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
+               i40e_set_partition_bw_setting(pf);
        } else if (strncmp(attr->ca_name, "commit", 6) == 0 && tmp == 1) {
-               if (i40e_commit_npar_bw_setting(pf))
+               if (i40e_commit_partition_bw_setting(pf))
                        return -EIO;
        }
 
similarity index 53%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_dcb.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.c
index 3b7724eae20f473ad21b0566b54cc6afb631f74a..ac0053ff49efb6715fe29397cf654796a8492543 100644 (file)
@@ -291,6 +291,188 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
        }
 }
 
+/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+                                    struct i40e_dcbx_config *dcbcfg)
+{
+       struct i40e_dcb_ets_config *etscfg;
+       u8 *buf = tlv->tlvinfo;
+       u16 offset = 0;
+       u8 priority;
+       int i;
+
+       etscfg = &dcbcfg->etscfg;
+
+       if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+               etscfg->willing = 1;
+
+       etscfg->cbs = 0;
+       /* Priority Group Table (4 octets)
+        * Octets:|    1    |    2    |    3    |    4    |
+        *        -----------------------------------------
+        *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+        *        -----------------------------------------
+        *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+        *        -----------------------------------------
+        */
+       for (i = 0; i < 4; i++) {
+               priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+                                I40E_CEE_PGID_PRIO_1_SHIFT);
+               etscfg->prioritytable[i * 2] =  priority;
+               priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+                                I40E_CEE_PGID_PRIO_0_SHIFT);
+               etscfg->prioritytable[i * 2 + 1] = priority;
+               offset++;
+       }
+
+       /* PG Percentage Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               etscfg->tcbwtable[i] = buf[offset++];
+
+       /* Number of TCs supported (1 octet) */
+       etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+                                     struct i40e_dcbx_config *dcbcfg)
+{
+       u8 *buf = tlv->tlvinfo;
+
+       if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+               dcbcfg->pfc.willing = 1;
+
+       /* ------------------------
+        * | PFC Enable | PFC TCs |
+        * ------------------------
+        * | 1 octet    | 1 octet |
+        */
+       dcbcfg->pfc.pfcenable = buf[0];
+       dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+                                  struct i40e_dcbx_config *dcbcfg)
+{
+       u16 length, typelength, offset = 0;
+       struct i40e_cee_app_prio *app;
+       u8 i, up, selector;
+
+       typelength = ntohs(tlv->hdr.typelen);
+       length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+                      I40E_LLDP_TLV_LEN_SHIFT);
+
+       dcbcfg->numapps = length/sizeof(*app);
+       if (!dcbcfg->numapps)
+               return;
+
+       for (i = 0; i < dcbcfg->numapps; i++) {
+               app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+               for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+                       if (app->prio_map & BIT(up))
+                               break;
+               }
+               dcbcfg->app[i].priority = up;
+               /* Get Selector from lower 2 bits, and convert to IEEE */
+               selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+               if (selector == I40E_CEE_APP_SEL_ETHTYPE)
+                       dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+               else if (selector == I40E_CEE_APP_SEL_TCPIP)
+                       dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+               else
+                       /* Keep selector as it is for unknown types */
+                       dcbcfg->app[i].selector = selector;
+               dcbcfg->app[i].protocolid = ntohs(app->protocol);
+               /* Move to next app */
+               offset += sizeof(*app);
+       }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+                              struct i40e_dcbx_config *dcbcfg)
+{
+       u16 len, tlvlen, sublen, typelength;
+       struct i40e_cee_feat_tlv *sub_tlv;
+       u8 subtype, feat_tlv_count = 0;
+       u32 ouisubtype;
+
+       ouisubtype = ntohl(tlv->ouisubtype);
+       subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+                      I40E_LLDP_TLV_SUBTYPE_SHIFT);
+       /* Return if not CEE DCBX */
+       if (subtype != I40E_CEE_DCBX_TYPE)
+               return;
+
+       typelength = ntohs(tlv->typelength);
+       tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+                       I40E_LLDP_TLV_LEN_SHIFT);
+       len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+             sizeof(struct i40e_cee_ctrl_tlv);
+       /* Return if no CEE DCBX Feature TLVs */
+       if (tlvlen <= len)
+               return;
+
+       sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+       while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+               typelength = ntohs(sub_tlv->hdr.typelen);
+               sublen = (u16)((typelength &
+                               I40E_LLDP_TLV_LEN_MASK) >>
+                               I40E_LLDP_TLV_LEN_SHIFT);
+               subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+                               I40E_LLDP_TLV_TYPE_SHIFT);
+               switch (subtype) {
+               case I40E_CEE_SUBTYPE_PG_CFG:
+                       i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+                       break;
+               case I40E_CEE_SUBTYPE_PFC_CFG:
+                       i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+                       break;
+               case I40E_CEE_SUBTYPE_APP_PRI:
+                       i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+                       break;
+               default:
+                       return; /* Invalid Sub-type return */
+               }
+               feat_tlv_count++;
+               /* Move to next sub TLV */
+               sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+                                               sizeof(sub_tlv->hdr.typelen) +
+                                               sublen);
+       }
+}
+
 /**
  * i40e_parse_org_tlv
  * @tlv: Organization specific TLV
@@ -312,6 +494,9 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
        case I40E_IEEE_8021QAZ_OUI:
                i40e_parse_ieee_tlv(tlv, dcbcfg);
                break;
+       case I40E_CEE_DCBX_OUI:
+               i40e_parse_cee_tlv(tlv, dcbcfg);
+               break;
        default:
                break;
        }
@@ -419,7 +604,7 @@ static void i40e_cee_to_dcb_v1_config(
 {
        u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status);
        u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
-       u8 i, tc, err, sync, oper;
+       u8 i, tc, err;
 
        /* CEE PG data to ETS config */
        dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -459,9 +644,7 @@ static void i40e_cee_to_dcb_v1_config(
        status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
                  I40E_AQC_CEE_APP_STATUS_SHIFT;
        err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
-       sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
-       oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
-       /* Add APPs if Error is False and Oper/Sync is True */
+       /* Add APPs if Error is False */
        if (!err) {
                /* CEE operating configuration supports FCoE/iSCSI/FIP only */
                dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
@@ -507,14 +690,17 @@ static void i40e_cee_to_dcb_config(
        /* CEE PG data to ETS config */
        dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
 
+       /* Note that the FW creates the oper_prio_tc nibbles reversed
+        * from those in the CEE Priority Group sub-TLV.
+        */
        for (i = 0; i < 4; i++) {
-               tc = (u8)((cee_cfg->oper_prio_tc[i] &
-                        I40E_CEE_PGID_PRIO_1_MASK) >>
-                        I40E_CEE_PGID_PRIO_1_SHIFT);
-               dcbcfg->etscfg.prioritytable[i*2] =  tc;
                tc = (u8)((cee_cfg->oper_prio_tc[i] &
                         I40E_CEE_PGID_PRIO_0_MASK) >>
                         I40E_CEE_PGID_PRIO_0_SHIFT);
+               dcbcfg->etscfg.prioritytable[i*2] =  tc;
+               tc = (u8)((cee_cfg->oper_prio_tc[i] &
+                        I40E_CEE_PGID_PRIO_1_MASK) >>
+                        I40E_CEE_PGID_PRIO_1_SHIFT);
                dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
        }
 
@@ -536,37 +722,85 @@ static void i40e_cee_to_dcb_config(
        dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
        dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
 
-       status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
-                 I40E_AQC_CEE_APP_STATUS_SHIFT;
+       i = 0;
+       status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+                 I40E_AQC_CEE_FCOE_STATUS_SHIFT;
        err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
        sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
        oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
-       /* Add APPs if Error is False and Oper/Sync is True */
+       /* Add FCoE APP if Error is False and Oper/Sync is True */
        if (!err && sync && oper) {
-               /* CEE operating configuration supports FCoE/iSCSI/FIP only */
-               dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
-
                /* FCoE APP */
-               dcbcfg->app[0].priority =
+               dcbcfg->app[i].priority =
                        (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
                         I40E_AQC_CEE_APP_FCOE_SHIFT;
-               dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
-               dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+               dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+               dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+               i++;
+       }
 
+       status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+                 I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+       err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+       sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+       oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+       /* Add iSCSI APP if Error is False and Oper/Sync is True */
+       if (!err && sync && oper) {
                /* iSCSI APP */
-               dcbcfg->app[1].priority =
+               dcbcfg->app[i].priority =
                        (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
                         I40E_AQC_CEE_APP_ISCSI_SHIFT;
-               dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
-               dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+               dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+               dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+               i++;
+       }
 
+       status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+                 I40E_AQC_CEE_FIP_STATUS_SHIFT;
+       err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+       sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+       oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+       /* Add FIP APP if Error is False and Oper/Sync is True */
+       if (!err && sync && oper) {
                /* FIP APP */
-               dcbcfg->app[2].priority =
+               dcbcfg->app[i].priority =
                        (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
                         I40E_AQC_CEE_APP_FIP_SHIFT;
-               dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
-               dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+               dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+               dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+               i++;
        }
+       dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+       i40e_status ret = I40E_SUCCESS;
+
+       /* IEEE mode */
+       hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+       /* Get Local DCB Config */
+       ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+                                    &hw->local_dcbx_config);
+       if (ret)
+               goto out;
+
+       /* Get Remote DCB Config */
+       ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+                                    I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+                                    &hw->remote_dcbx_config);
+       /* Don't treat ENOENT as an error for Remote MIBs */
+       if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+               ret = I40E_SUCCESS;
+
+out:
+       return ret;
 }
 
 /**
@@ -584,7 +818,7 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
        /* If Firmware version < v4.33 IEEE only */
        if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
            (hw->aq.fw_maj_ver < 4))
-               goto ieee;
+               return i40e_get_ieee_dcb_config(hw);
 
        /* If Firmware version == v4.33 use old CEE struct */
        if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
@@ -593,6 +827,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
                if (ret == I40E_SUCCESS) {
                        /* CEE mode */
                        hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+                       hw->local_dcbx_config.tlv_status =
+                                       LE16_TO_CPU(cee_v1_cfg.tlv_status);
                        i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
                                                  &hw->local_dcbx_config);
                }
@@ -602,6 +838,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
                if (ret == I40E_SUCCESS) {
                        /* CEE mode */
                        hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+                       hw->local_dcbx_config.tlv_status =
+                                       LE32_TO_CPU(cee_cfg.tlv_status);
                        i40e_cee_to_dcb_config(&cee_cfg,
                                               &hw->local_dcbx_config);
                }
@@ -609,16 +847,14 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
 
        /* CEE mode not enabled try querying IEEE data */
        if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
-               goto ieee;
-       else
+               return i40e_get_ieee_dcb_config(hw);
+
+       if (ret != I40E_SUCCESS)
                goto out;
 
-ieee:
-       /* IEEE mode */
-       hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
-       /* Get Local DCB Config */
+       /* Get CEE DCB Desired Config */
        ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
-                                    &hw->local_dcbx_config);
+                                    &hw->desired_dcbx_config);
        if (ret)
                goto out;
 
@@ -694,6 +930,334 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
        return ret;
 }
 
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+                                 struct i40e_dcbx_config *dcbcfg)
+{
+       u8 priority0, priority1, maxtcwilling = 0;
+       struct i40e_dcb_ets_config *etscfg;
+       u16 offset = 0, typelength, i;
+       u8 *buf = tlv->tlvinfo;
+       u32 ouisubtype;
+
+       typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+                       I40E_IEEE_ETS_TLV_LENGTH);
+       tlv->typelength = htons(typelength);
+
+       ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+                       I40E_IEEE_SUBTYPE_ETS_CFG);
+       tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+       /* First Octet post subtype
+        * --------------------------
+        * |will-|CBS  | Re-  | Max |
+        * |ing  |     |served| TCs |
+        * --------------------------
+        * |1bit | 1bit|3 bits|3bits|
+        */
+       etscfg = &dcbcfg->etscfg;
+       if (etscfg->willing)
+               maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+       maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+       buf[offset] = maxtcwilling;
+
+       /* Move offset to Priority Assignment Table */
+       offset++;
+
+       /* Priority Assignment Table (4 octets)
+        * Octets:|    1    |    2    |    3    |    4    |
+        *        -----------------------------------------
+        *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+        *        -----------------------------------------
+        *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+        *        -----------------------------------------
+        */
+       for (i = 0; i < 4; i++) {
+               priority0 = etscfg->prioritytable[i * 2] & 0xF;
+               priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+               buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+                               priority1;
+               offset++;
+       }
+
+       /* TC Bandwidth Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               buf[offset++] = etscfg->tcbwtable[i];
+
+       /* TSA Assignment Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+                                    struct i40e_dcbx_config *dcbcfg)
+{
+       struct i40e_dcb_ets_config *etsrec;
+       u16 offset = 0, typelength, i;
+       u8 priority0, priority1;
+       u8 *buf = tlv->tlvinfo;
+       u32 ouisubtype;
+
+       typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+                       I40E_IEEE_ETS_TLV_LENGTH);
+       tlv->typelength = htons(typelength);
+
+       ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+                       I40E_IEEE_SUBTYPE_ETS_REC);
+       tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+       etsrec = &dcbcfg->etsrec;
+       /* First Octet is reserved */
+       /* Move offset to Priority Assignment Table */
+       offset++;
+
+       /* Priority Assignment Table (4 octets)
+        * Octets:|    1    |    2    |    3    |    4    |
+        *        -----------------------------------------
+        *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+        *        -----------------------------------------
+        *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+        *        -----------------------------------------
+        */
+       for (i = 0; i < 4; i++) {
+               priority0 = etsrec->prioritytable[i * 2] & 0xF;
+               priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+               buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+                               priority1;
+               offset++;
+       }
+
+       /* TC Bandwidth Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               buf[offset++] = etsrec->tcbwtable[i];
+
+       /* TSA Assignment Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               buf[offset++] = etsrec->tsatable[i];
+}
+
+ /**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+                                 struct i40e_dcbx_config *dcbcfg)
+{
+       u8 *buf = tlv->tlvinfo;
+       u32 ouisubtype;
+       u16 typelength;
+
+       typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+                       I40E_IEEE_PFC_TLV_LENGTH);
+       tlv->typelength = htons(typelength);
+
+       ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+                       I40E_IEEE_SUBTYPE_PFC_CFG);
+       tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+       /* ----------------------------------------
+        * |will-|MBC  | Re-  | PFC |  PFC Enable  |
+        * |ing  |     |served| cap |              |
+        * -----------------------------------------
+        * |1bit | 1bit|2 bits|4bits| 1 octet      |
+        */
+       if (dcbcfg->pfc.willing)
+               buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+       if (dcbcfg->pfc.mbc)
+               buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+       buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+       buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv -  Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+                                     struct i40e_dcbx_config *dcbcfg)
+{
+       u16 typelength, length, offset = 0;
+       u8 priority, selector, i = 0;
+       u8 *buf = tlv->tlvinfo;
+       u32 ouisubtype;
+
+       /* No APP TLVs then just return */
+       if (dcbcfg->numapps == 0)
+               return;
+       ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+                       I40E_IEEE_SUBTYPE_APP_PRI);
+       tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+       /* Move offset to App Priority Table */
+       offset++;
+       /* Application Priority Table (3 octets)
+        * Octets:|         1          |    2    |    3    |
+        *        -----------------------------------------
+        *        |Priority|Rsrvd| Sel |    Protocol ID    |
+        *        -----------------------------------------
+        *   Bits:|23    21|20 19|18 16|15                0|
+        *        -----------------------------------------
+        */
+       while (i < dcbcfg->numapps) {
+               priority = dcbcfg->app[i].priority & 0x7;
+               selector = dcbcfg->app[i].selector & 0x7;
+               buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+               buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+               buf[offset + 2] =  dcbcfg->app[i].protocolid & 0xFF;
+               /* Move to next app */
+               offset += 3;
+               i++;
+               if (i >= I40E_DCBX_MAX_APPS)
+                       break;
+       }
+       /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+       length = sizeof(tlv->ouisubtype) + 1 + (i*3);
+       typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+               (length & 0x1FF));
+       tlv->typelength = htons(typelength);
+}
+
+ /**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ *
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+                            struct i40e_dcbx_config *dcbcfg,
+                            u16 tlvid)
+{
+       switch (tlvid) {
+       case I40E_IEEE_TLV_ID_ETS_CFG:
+               i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+               break;
+       case I40E_IEEE_TLV_ID_ETS_REC:
+               i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+               break;
+       case I40E_IEEE_TLV_ID_PFC_CFG:
+               i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+               break;
+       case I40E_IEEE_TLV_ID_APP_PRI:
+               i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+               break;
+       default:
+               break;
+       }
+}
+
+ /**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+{
+       i40e_status ret = I40E_SUCCESS;
+       struct i40e_dcbx_config *dcbcfg;
+       struct i40e_virt_mem mem;
+       u8 mib_type, *lldpmib;
+       u16 miblen;
+
+       /* update the hw local config */
+       dcbcfg = &hw->local_dcbx_config;
+       /* Allocate the LLDPDU */
+       ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+       if (ret)
+               return ret;
+
+       mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+       if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+               mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+                           SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+       }
+       lldpmib = (u8 *)mem.va;
+       ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+       ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+       i40e_free_virt_mem(hw, &mem);
+       return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @hw: pointer to the hw struct
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+                                             struct i40e_dcbx_config *dcbcfg)
+{
+       u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
+       i40e_status ret = I40E_SUCCESS;
+       struct i40e_lldp_org_tlv *tlv;
+       u16 type, typelength;
+
+       tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+       while (1) {
+               i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+               typelength = ntohs(tlv->typelength);
+               type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+                               I40E_LLDP_TLV_TYPE_SHIFT);
+               length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+                               I40E_LLDP_TLV_LEN_SHIFT);
+               if (length)
+                       offset += length + 2;
+               /* END TLV or beyond LLDPDU size */
+               if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
+                   (offset > I40E_LLDPDU_SIZE))
+                       break;
+               /* Move to next TLV */
+               if (length)
+                       tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+                             sizeof(tlv->typelength) + length);
+       }
+       *miblen = offset;
+       return ret;
+}
+
 /**
  * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
  * @hw: pointer to the HW structure
similarity index 81%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_dcb.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.h
index 2ec75fb2a595249a99fc0a0fc3310519665d8fdb..0c000c964985c361040a0074cbb6516dddf61bef 100644 (file)
 #define I40E_IEEE_SUBTYPE_PFC_CFG      11
 #define I40E_IEEE_SUBTYPE_APP_PRI      12
 
+#define I40E_CEE_DCBX_OUI              0x001b21
+#define I40E_CEE_DCBX_TYPE             2
+
+#define I40E_CEE_SUBTYPE_CTRL          1
+#define I40E_CEE_SUBTYPE_PG_CFG                2
+#define I40E_CEE_SUBTYPE_PFC_CFG       3
+#define I40E_CEE_SUBTYPE_APP_PRI       4
+
+#define I40E_CEE_MAX_FEAT_TYPE         3
 #define I40E_LLDP_ADMINSTATUS_DISABLED         0
 #define I40E_LLDP_ADMINSTATUS_ENABLED_RX       1
 #define I40E_LLDP_ADMINSTATUS_ENABLED_TX       2
@@ -66,9 +75,9 @@
 #define I40E_IEEE_ETS_MAXTC_SHIFT      0
 #define I40E_IEEE_ETS_MAXTC_MASK       (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
 #define I40E_IEEE_ETS_CBS_SHIFT                6
-#define I40E_IEEE_ETS_CBS_MASK         (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK         BIT(I40E_IEEE_ETS_CBS_SHIFT)
 #define I40E_IEEE_ETS_WILLING_SHIFT    7
-#define I40E_IEEE_ETS_WILLING_MASK     (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK     BIT(I40E_IEEE_ETS_WILLING_SHIFT)
 #define I40E_IEEE_ETS_PRIO_0_SHIFT     0
 #define I40E_IEEE_ETS_PRIO_0_MASK      (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
 #define I40E_IEEE_ETS_PRIO_1_SHIFT     4
@@ -89,9 +98,9 @@
 #define I40E_IEEE_PFC_CAP_SHIFT                0
 #define I40E_IEEE_PFC_CAP_MASK         (0xF << I40E_IEEE_PFC_CAP_SHIFT)
 #define I40E_IEEE_PFC_MBC_SHIFT                6
-#define I40E_IEEE_PFC_MBC_MASK         (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK         BIT(I40E_IEEE_PFC_MBC_SHIFT)
 #define I40E_IEEE_PFC_WILLING_SHIFT    7
-#define I40E_IEEE_PFC_WILLING_MASK     (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK     BIT(I40E_IEEE_PFC_WILLING_SHIFT)
 
 /* Defines for IEEE APP TLV */
 #define I40E_IEEE_APP_SEL_SHIFT                0
@@ -128,6 +137,36 @@ struct i40e_lldp_org_tlv {
        __be32 ouisubtype;
        u8 tlvinfo[1];
 };
+
+struct i40e_cee_tlv_hdr {
+       __be16 typelen;
+       u8 operver;
+       u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+       struct i40e_cee_tlv_hdr hdr;
+       __be32 seqno;
+       __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+       struct i40e_cee_tlv_hdr hdr;
+       u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK  0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK     0x20
+       u8 subtype;
+       u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+       __be16 protocol;
+       u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK     0x03
+       __be16 lower_oui;
+       u8 prio_map;
+};
 #pragma pack()
 
 /*
@@ -169,5 +208,8 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
                                             struct i40e_dcbx_config *dcbcfg);
 i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
 i40e_status i40e_init_dcb(struct i40e_hw *hw);
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+                                             struct i40e_dcbx_config *dcbcfg);
 
 #endif /* _I40E_DCB_H_ */
similarity index 94%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_dcb_nl.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb_nl.c
index 457a0e3a577c001735029af58b62bcd5cca666d6..b5d6f6cf8b7990fa217736a593f4d86f03abcfd6 100644 (file)
@@ -179,12 +179,16 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
        if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
                return;
 
+       /* MFP mode but not an iSCSI PF so return */
+       if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+               return;
+
        dcbxcfg = &hw->local_dcbx_config;
 
        /* Set up all the App TLVs if DCBx is negotiated */
        for (i = 0; i < dcbxcfg->numapps; i++) {
                prio = dcbxcfg->app[i].priority;
-               tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+               tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
 
                /* Add APP only if the TC is enabled for this VSI */
                if (tc_map & vsi->tc_config.enabled_tc) {
@@ -233,14 +237,13 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
                              struct i40e_dcb_app_priority_table *app)
 {
        int v, err;
+
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v] && pf->vsi[v]->netdev) {
                        err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
-                       if (err)
-                               dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
-                                        __func__, pf->vsi[v]->seid,
-                                        err, app->selector,
-                                        app->protocolid, app->priority);
+                       dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+                               pf->vsi[v]->seid, err, app->selector,
+                               app->protocolid, app->priority);
                }
        }
 }
@@ -283,6 +286,10 @@ void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
        struct i40e_dcb_app_priority_table app;
        int i;
 
+       /* MFP mode but not an iSCSI PF so return */
+       if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+               return;
+
        for (i = 0; i < old_cfg->numapps; i++) {
                app = old_cfg->app[i];
                /* The APP is not available anymore delete it */
similarity index 89%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_debugfs.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_debugfs.c
index 2d5074e834badeba2b93c5fd921d7d0a6e55f396..54b5d054044266e2ea515e78379ed4d46af1432b 100644 (file)
@@ -414,82 +414,82 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
        nstat = i40e_get_vsi_stats_struct(vsi);
        dev_info(&pf->pdev->dev,
                 "    net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
-                (long unsigned int)nstat->rx_packets,
-                (long unsigned int)nstat->rx_bytes,
-                (long unsigned int)nstat->rx_errors,
-                (long unsigned int)nstat->rx_dropped);
+                (unsigned long int)nstat->rx_packets,
+                (unsigned long int)nstat->rx_bytes,
+                (unsigned long int)nstat->rx_errors,
+                (unsigned long int)nstat->rx_dropped);
        dev_info(&pf->pdev->dev,
                 "    net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
-                (long unsigned int)nstat->tx_packets,
-                (long unsigned int)nstat->tx_bytes,
-                (long unsigned int)nstat->tx_errors,
-                (long unsigned int)nstat->tx_dropped);
+                (unsigned long int)nstat->tx_packets,
+                (unsigned long int)nstat->tx_bytes,
+                (unsigned long int)nstat->tx_errors,
+                (unsigned long int)nstat->tx_dropped);
        dev_info(&pf->pdev->dev,
                 "    net_stats: multicast = %lu, collisions = %lu\n",
-                (long unsigned int)nstat->multicast,
-                (long unsigned int)nstat->collisions);
+                (unsigned long int)nstat->multicast,
+                (unsigned long int)nstat->collisions);
        dev_info(&pf->pdev->dev,
                 "    net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
-                (long unsigned int)nstat->rx_length_errors,
-                (long unsigned int)nstat->rx_over_errors,
-                (long unsigned int)nstat->rx_crc_errors);
+                (unsigned long int)nstat->rx_length_errors,
+                (unsigned long int)nstat->rx_over_errors,
+                (unsigned long int)nstat->rx_crc_errors);
        dev_info(&pf->pdev->dev,
                 "    net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
-                (long unsigned int)nstat->rx_frame_errors,
-                (long unsigned int)nstat->rx_fifo_errors,
-                (long unsigned int)nstat->rx_missed_errors);
+                (unsigned long int)nstat->rx_frame_errors,
+                (unsigned long int)nstat->rx_fifo_errors,
+                (unsigned long int)nstat->rx_missed_errors);
        dev_info(&pf->pdev->dev,
                 "    net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
-                (long unsigned int)nstat->tx_aborted_errors,
-                (long unsigned int)nstat->tx_carrier_errors,
-                (long unsigned int)nstat->tx_fifo_errors);
+                (unsigned long int)nstat->tx_aborted_errors,
+                (unsigned long int)nstat->tx_carrier_errors,
+                (unsigned long int)nstat->tx_fifo_errors);
        dev_info(&pf->pdev->dev,
                 "    net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
-                (long unsigned int)nstat->tx_heartbeat_errors,
-                (long unsigned int)nstat->tx_window_errors);
+                (unsigned long int)nstat->tx_heartbeat_errors,
+                (unsigned long int)nstat->tx_window_errors);
        dev_info(&pf->pdev->dev,
                 "    net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
-                (long unsigned int)nstat->rx_compressed,
-                (long unsigned int)nstat->tx_compressed);
+                (unsigned long int)nstat->rx_compressed,
+                (unsigned long int)nstat->tx_compressed);
        dev_info(&pf->pdev->dev,
                 "    net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
-                (long unsigned int)vsi->net_stats_offsets.rx_packets,
-                (long unsigned int)vsi->net_stats_offsets.rx_bytes,
-                (long unsigned int)vsi->net_stats_offsets.rx_errors,
-                (long unsigned int)vsi->net_stats_offsets.rx_dropped);
+                (unsigned long int)vsi->net_stats_offsets.rx_packets,
+                (unsigned long int)vsi->net_stats_offsets.rx_bytes,
+                (unsigned long int)vsi->net_stats_offsets.rx_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_dropped);
        dev_info(&pf->pdev->dev,
                 "    net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
-                (long unsigned int)vsi->net_stats_offsets.tx_packets,
-                (long unsigned int)vsi->net_stats_offsets.tx_bytes,
-                (long unsigned int)vsi->net_stats_offsets.tx_errors,
-                (long unsigned int)vsi->net_stats_offsets.tx_dropped);
+                (unsigned long int)vsi->net_stats_offsets.tx_packets,
+                (unsigned long int)vsi->net_stats_offsets.tx_bytes,
+                (unsigned long int)vsi->net_stats_offsets.tx_errors,
+                (unsigned long int)vsi->net_stats_offsets.tx_dropped);
        dev_info(&pf->pdev->dev,
                 "    net_stats_offsets: multicast = %lu, collisions = %lu\n",
-                (long unsigned int)vsi->net_stats_offsets.multicast,
-                (long unsigned int)vsi->net_stats_offsets.collisions);
+                (unsigned long int)vsi->net_stats_offsets.multicast,
+                (unsigned long int)vsi->net_stats_offsets.collisions);
        dev_info(&pf->pdev->dev,
                 "    net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
-                (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
-                (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
-                (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
+                (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
        dev_info(&pf->pdev->dev,
                 "    net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
-                (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
-                (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
-                (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
+                (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
        dev_info(&pf->pdev->dev,
                 "    net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
-                (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
-                (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
-                (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
+                (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
+                (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
+                (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
        dev_info(&pf->pdev->dev,
                 "    net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
-                (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
-                (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
+                (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
+                (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
        dev_info(&pf->pdev->dev,
                 "    net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
-                (long unsigned int)vsi->net_stats_offsets.rx_compressed,
-                (long unsigned int)vsi->net_stats_offsets.tx_compressed);
+                (unsigned long int)vsi->net_stats_offsets.rx_compressed,
+                (unsigned long int)vsi->net_stats_offsets.tx_compressed);
        dev_info(&pf->pdev->dev,
                 "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
                 vsi->tx_restart, vsi->tx_busy,
@@ -497,6 +497,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
        rcu_read_lock();
        for (i = 0; i < vsi->num_queue_pairs; i++) {
                struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+
                if (!rx_ring)
                        continue;
 
@@ -537,7 +538,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                dev_info(&pf->pdev->dev,
                         "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
                         i, rx_ring->size,
-                        (long unsigned int)rx_ring->dma);
+                        (unsigned long int)rx_ring->dma);
                dev_info(&pf->pdev->dev,
                         "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
                         i, rx_ring->vsi,
@@ -545,6 +546,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
        }
        for (i = 0; i < vsi->num_queue_pairs; i++) {
                struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+
                if (!tx_ring)
                        continue;
 
@@ -583,7 +585,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                dev_info(&pf->pdev->dev,
                         "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
                         i, tx_ring->size,
-                        (long unsigned int)tx_ring->dma);
+                        (unsigned long int)tx_ring->dma);
                dev_info(&pf->pdev->dev,
                         "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
                         i, tx_ring->vsi,
@@ -755,6 +757,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
        ring = &(hw->aq.asq);
        for (i = 0; i < ring->count; i++) {
                struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
                dev_info(&pf->pdev->dev,
                         "   at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
                         i, d->flags, d->opcode, d->datalen, d->retval,
@@ -767,6 +770,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
        ring = &(hw->aq.arq);
        for (i = 0; i < ring->count; i++) {
                struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
                dev_info(&pf->pdev->dev,
                         "   ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
                         i, d->flags, d->opcode, d->datalen, d->retval,
@@ -814,8 +818,15 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
        else
                ring = *vsi->tx_rings[ring_id];
        if (cnt == 2) {
+               void *head = (struct i40e_tx_desc *)ring.desc + ring.count;
+               u32 tx_head = le32_to_cpu(*(volatile __le32 *)head);
+
                dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
                         vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
+               dev_info(&pf->pdev->dev, "head = %04x tail = %04x\n",
+                        is_rx_ring ? 0 : tx_head, readl(ring.tail));
+               dev_info(&pf->pdev->dev, "ntc = %04x ntu = %04x\n",
+                        ring.next_to_clean, ring.next_to_use);
                for (i = 0; i < ring.count; i++) {
                        if (!is_rx_ring) {
                                txd = I40E_TX_DESC(&ring, i);
@@ -901,9 +912,10 @@ static void i40e_dbg_dump_capabilities(struct i40e_pf *pf)
        dev_info(&pf->pdev->dev,
                 "    valid_functions = 0x%04x\tsr_iov_1_1 = %d\tnum_vfs = %d\tvf_base_id = %d\n",
                 p->valid_functions, p->sr_iov_1_1, p->num_vfs, p->vf_base_id);
+       dev_info(&pf->pdev->dev, "    nvm_image_type = %d\n", p->nvm_image_type);
        dev_info(&pf->pdev->dev,
-                "    num_vsis = %d\tvmdq = %d\tmfp_mode_1 = %d\tnvm_image_type = \t%d\n",
-                p->num_vsis, p->vmdq, p->mfp_mode_1, p->nvm_image_type);
+                "    num_vsis = %d\tvmdq = %d\tflex10_enable = %d\tflex10_capable = %d\n",
+                p->num_vsis, p->vmdq, p->flex10_enable, p->flex10_capable);
        dev_info(&pf->pdev->dev,
                 "    evb_802_1_qbg = %d\tevb_802_1_qbh = %d\tmgmt_cem = %d\tieee_1588 = %d\n",
                 p->evb_802_1_qbg, p->evb_802_1_qbh, p->mgmt_cem, p->ieee_1588);
@@ -958,29 +970,6 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
                 estats->tx_discards, estats->tx_errors);
 }
 
-/**
- * i40e_dbg_dump_veb_stats - dump extra veb stats
- * @pf: the i40e_pf created in command write
- * @tc_stats: the veb TC stats structure to be dumped
- **/
-static void i40e_dbg_dump_veb_stats(struct i40e_pf *pf,
-                                   struct i40e_veb_tc_stats *tc_stats)
-{
-       int i;
-
-       dev_info(&pf->pdev->dev, "  vebstats:\n");
-       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               dev_info(&pf->pdev->dev,
-                        "    tc_rx_packets[%d] = \t%lld \ttc_rx_bytes[%d] = \t%lld\n",
-                        i, tc_stats->tc_rx_packets[i],
-                        i, tc_stats->tc_rx_bytes[i]);
-               dev_info(&pf->pdev->dev,
-                        "    tc_tx_packets[%d] = \t%lld \ttc_tx_bytes[%d] = \t%lld\n",
-                        i, tc_stats->tc_tx_packets[i],
-                        i, tc_stats->tc_tx_bytes[i]);
-       }
-}
-
 /**
  * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
  * @pf: the i40e_pf created in command write
@@ -1025,7 +1014,6 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
                         veb->bw_tc_limit_credits[i], veb->bw_tc_max_quanta[i]);
        }
        i40e_dbg_dump_eth_stats(pf, &veb->stats);
-       i40e_dbg_dump_veb_stats(pf, &veb->tc_stats);
 }
 
 /**
@@ -1058,7 +1046,7 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
                dev_info(&pf->pdev->dev, "no VFs allocated\n");
        } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
                vf = &pf->vf[vf_id];
-               vsi = pf->vsi[vf->lan_vsi_index];
+               vsi = pf->vsi[vf->lan_vsi_idx];
                dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
                         vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
                dev_info(&pf->pdev->dev, "       num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
@@ -1086,23 +1074,53 @@ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
 }
 
 /**
- * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
- * @pf: the PF that would be altered
- * @flag: flag that needs enabling or disabling
- * @enable: Enable/disable FD SD/ATR
+ * i40e_dbg_dump_dcb_cfg - Dump DCB config data struct
+ * @pf: the corresponding PF
+ * @cfg: DCB Config data structure
+ * @prefix: Prefix string
  **/
-static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
+static void i40e_dbg_dump_dcb_cfg(struct i40e_pf *pf,
+                                 struct i40e_dcbx_config *cfg,
+                                 char *prefix)
 {
-       if (enable) {
-               pf->flags |= flag;
-       } else {
-               pf->flags &= ~flag;
-               pf->auto_disable_flags |= flag;
+       int i;
+
+       dev_info(&pf->pdev->dev,
+                "%s ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+                prefix, cfg->etscfg.willing, cfg->etscfg.cbs,
+                cfg->etscfg.maxtcs);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev, "%s ets_cfg: up=%d tc=%d\n",
+                        prefix, i, cfg->etscfg.prioritytable[i]);
        }
-       dev_info(&pf->pdev->dev, "requesting a PF reset\n");
-       i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
-}
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev, "%s ets_cfg: tc=%d tcbw=%d tctsa=%d\n",
+                        prefix, i, cfg->etscfg.tcbwtable[i],
+                        cfg->etscfg.tsatable[i]);
+       }
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev, "%s ets_rec: up=%d tc=%d\n",
+                        prefix, i, cfg->etsrec.prioritytable[i]);
+       }
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev, "%s ets_rec: tc=%d tcbw=%d tctsa=%d\n",
+                        prefix, i, cfg->etsrec.tcbwtable[i],
+                        cfg->etsrec.tsatable[i]);
+       }
+       dev_info(&pf->pdev->dev,
+                "%s pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+                prefix, cfg->pfc.willing, cfg->pfc.mbc,
+                cfg->pfc.pfccap, cfg->pfc.pfcenable);
 
+       dev_info(&pf->pdev->dev,
+                "%s app_table: num_apps=%d\n", prefix, cfg->numapps);
+       for (i = 0; i < cfg->numapps; i++) {
+               dev_info(&pf->pdev->dev, "%s app_table: %d prio=%d selector=%d protocol=0x%x\n",
+                        prefix, i, cfg->app[i].priority,
+                        cfg->app[i].selector,
+                        cfg->app[i].protocolid);
+       }
+}
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
 /**
  * i40e_dbg_command_write - write into command datum
@@ -1132,8 +1150,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        if (!cmd_buf)
                return count;
        bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
-       if (bytes_not_copied < 0)
+       if (bytes_not_copied < 0) {
+               kfree(cmd_buf);
                return bytes_not_copied;
+       }
        if (bytes_not_copied > 0)
                count -= bytes_not_copied;
        cmd_buf[count] = '\0';
@@ -1147,6 +1167,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        if (strncmp(cmd_buf, "read", 4) == 0) {
                u32 address;
                u32 value;
+
                cnt = sscanf(&cmd_buf[4], "%i", &address);
                if (cnt != 1) {
                        dev_info(&pf->pdev->dev, "read <reg>\n");
@@ -1154,9 +1175,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                }
 
                /* check the range on address */
-               if (address >= I40E_MAX_REGISTER) {
-                       dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
-                                address);
+               if (address > (pf->ioremap_len - sizeof(u32))) {
+                       dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
+                                address, (pf->ioremap_len - sizeof(u32)));
                        goto command_write_done;
                }
 
@@ -1166,6 +1187,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 
        } else if (strncmp(cmd_buf, "write", 5) == 0) {
                u32 address, value;
+
                cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
                if (cnt != 2) {
                        dev_info(&pf->pdev->dev, "write <reg> <value>\n");
@@ -1173,9 +1195,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                }
 
                /* check the range on address */
-               if (address >= I40E_MAX_REGISTER) {
-                       dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
-                                address);
+               if (address > (pf->ioremap_len - sizeof(u32))) {
+                       dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
+                                address, (pf->ioremap_len - sizeof(u32)));
                        goto command_write_done;
                }
                wr32(&pf->hw, address, value);
@@ -1201,7 +1223,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
                        pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
                        i40e_do_reset_safe(pf,
-                                          (1 << __I40E_PF_RESET_REQUESTED));
+                                          BIT_ULL(__I40E_PF_RESET_REQUESTED));
                }
 
                vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
@@ -1212,7 +1234,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
 
        } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
-               sscanf(&cmd_buf[7], "%i", &vsi_seid);
+               cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev,
+                                "del vsi: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               }
                vsi = i40e_dbg_find_vsi(pf, vsi_seid);
                if (!vsi) {
                        dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
@@ -1267,6 +1295,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 
        } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
                int i;
+
                cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
                if (cnt != 1) {
                        dev_info(&pf->pdev->dev,
@@ -1319,8 +1348,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        goto command_write_done;
                }
 
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                f = i40e_add_filter(vsi, ma, vlan, false, false);
-               ret = i40e_sync_vsi_filters(vsi);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+               ret = i40e_sync_vsi_filters(vsi, true);
                if (f && !ret)
                        dev_info(&pf->pdev->dev,
                                 "add macaddr: %pM vlan=%d added to VSI %d\n",
@@ -1356,8 +1387,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        goto command_write_done;
                }
 
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                i40e_del_filter(vsi, ma, vlan, false, false);
-               ret = i40e_sync_vsi_filters(vsi);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+               ret = i40e_sync_vsi_filters(vsi, true);
                if (!ret)
                        dev_info(&pf->pdev->dev,
                                 "del macaddr: %pM vlan=%d removed from VSI %d\n",
@@ -1443,6 +1476,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                i40e_dbg_dump_vf_all(pf);
                } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
                        int ring_id, desc_n;
+
                        if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
                                cnt = sscanf(&cmd_buf[12], "%i %i %i",
                                             &vsi_seid, &ring_id, &desc_n);
@@ -1481,6 +1515,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                                &pf->hw.local_dcbx_config;
                        struct i40e_dcbx_config *r_cfg =
                                                &pf->hw.remote_dcbx_config;
+                       struct i40e_dcbx_config *d_cfg =
+                                               &pf->hw.desired_dcbx_config;
                        int i, ret;
 
                        bw_data = kzalloc(sizeof(
@@ -1517,68 +1553,62 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        kfree(bw_data);
                        bw_data = NULL;
 
-                       dev_info(&pf->pdev->dev,
-                                "port dcbx_mode=%d\n", cfg->dcbx_mode);
-                       dev_info(&pf->pdev->dev,
-                                "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
-                                cfg->etscfg.willing, cfg->etscfg.cbs,
-                                cfg->etscfg.maxtcs);
-                       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                               dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
-                                        i, cfg->etscfg.prioritytable[i],
-                                        cfg->etscfg.tcbwtable[i],
-                                        cfg->etscfg.tsatable[i]);
-                       }
-                       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                               dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
-                                        i, cfg->etsrec.prioritytable[i],
-                                        cfg->etsrec.tcbwtable[i],
-                                        cfg->etsrec.tsatable[i]);
+                       if (cfg->dcbx_mode == I40E_DCBX_MODE_CEE) {
+                               dev_info(&pf->pdev->dev,
+                                        "CEE DCBX mode with Oper TLV Status = 0x%x\n",
+                                        cfg->tlv_status);
+                               i40e_dbg_dump_dcb_cfg(pf, d_cfg, "DesiredCfg");
+                       } else {
+                               dev_info(&pf->pdev->dev, "IEEE DCBX mode\n");
                        }
-                       dev_info(&pf->pdev->dev,
-                                "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
-                                cfg->pfc.willing, cfg->pfc.mbc,
-                                cfg->pfc.pfccap, cfg->pfc.pfcenable);
-                       dev_info(&pf->pdev->dev,
-                                "port app_table: num_apps=%d\n", cfg->numapps);
-                       for (i = 0; i < cfg->numapps; i++) {
-                               dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
-                                        i, cfg->app[i].priority,
-                                        cfg->app[i].selector,
-                                        cfg->app[i].protocolid);
+
+                       i40e_dbg_dump_dcb_cfg(pf, cfg, "OperCfg");
+                       i40e_dbg_dump_dcb_cfg(pf, r_cfg, "PeerCfg");
+
+               } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
+                       int cluster_id, table_id;
+                       int index, ret;
+                       u16 buff_len = 4096;
+                       u32 next_index;
+                       u8 next_table;
+                       u8 *buff;
+                       u16 rlen;
+
+                       cnt = sscanf(&cmd_buf[18], "%i %i %i",
+                                    &cluster_id, &table_id, &index);
+                       if (cnt != 3) {
+                               dev_info(&pf->pdev->dev,
+                                        "dump debug fwdata <cluster_id> <table_id> <index>\n");
+                               goto command_write_done;
                        }
-                       /* Peer TLV DCBX data */
+
                        dev_info(&pf->pdev->dev,
-                                "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
-                                r_cfg->etscfg.willing,
-                                r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
-                       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                               dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
-                                        i, r_cfg->etscfg.prioritytable[i],
-                                        r_cfg->etscfg.tcbwtable[i],
-                                        r_cfg->etscfg.tsatable[i]);
-                       }
-                       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                               dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
-                                        i, r_cfg->etsrec.prioritytable[i],
-                                        r_cfg->etsrec.tcbwtable[i],
-                                        r_cfg->etsrec.tsatable[i]);
+                                "AQ debug dump fwdata params %x %x %x %x\n",
+                                cluster_id, table_id, index, buff_len);
+                       buff = kzalloc(buff_len, GFP_KERNEL);
+                       if (!buff)
+                               goto command_write_done;
+
+                       ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
+                                                index, buff_len, buff, &rlen,
+                                                &next_table, &next_index,
+                                                NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "debug dump fwdata AQ Failed %d 0x%x\n",
+                                        ret, pf->hw.aq.asq_last_status);
+                               kfree(buff);
+                               buff = NULL;
+                               goto command_write_done;
                        }
                        dev_info(&pf->pdev->dev,
-                                "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
-                                r_cfg->pfc.willing,
-                                r_cfg->pfc.mbc,
-                                r_cfg->pfc.pfccap,
-                                r_cfg->pfc.pfcenable);
-                       dev_info(&pf->pdev->dev,
-                                "remote port app_table: num_apps=%d\n",
-                                r_cfg->numapps);
-                       for (i = 0; i < r_cfg->numapps; i++) {
-                               dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
-                                        i, r_cfg->app[i].priority,
-                                        r_cfg->app[i].selector,
-                                        r_cfg->app[i].protocolid);
-                       }
+                                "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
+                                rlen, next_table, next_index);
+                       print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+                                      DUMP_PREFIX_OFFSET, 16, 1,
+                                      buff, rlen, true);
+                       kfree(buff);
+                       buff = NULL;
                } else {
                        dev_info(&pf->pdev->dev,
                                 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
@@ -1594,6 +1624,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 
        } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
                u32 level;
+
                cnt = sscanf(&cmd_buf[10], "%i", &level);
                if (cnt) {
                        if (I40E_DEBUG_USER & level) {
@@ -1611,29 +1642,30 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                }
        } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "corer", 5) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "globr", 5) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
        } else if (strncmp(cmd_buf, "defport on", 10) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport enabled\n");
                pf->cur_promisc = true;
-               i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
        } else if (strncmp(cmd_buf, "defport off", 11) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport disabled\n");
                pf->cur_promisc = false;
-               i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
                if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
                        cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
                        if (cnt == 0) {
                                int i;
+
                                for (i = 0; i < pf->num_alloc_vsi; i++)
                                        i40e_vsi_reset_stats(pf->vsi[i]);
                                dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
@@ -1833,8 +1865,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                printk("packet in ascii %s\n", asc_packet);
 
                for (i = 0; i < packet_len; i++) {
-                       sscanf(&asc_packet[j], "%2hhx ",
-                              &raw_packet[i]);
+                       cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
+                       if (!cnt)
+                               break;
                        j += 3;
                }
                dev_info(&pf->pdev->dev, "FD raw packet dump\n");
@@ -1852,13 +1885,42 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                raw_packet = NULL;
                kfree(asc_packet);
                asc_packet = NULL;
-       } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
-               i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
-       } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
-               i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
        } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
                dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
                         i40e_get_current_fd_count(pf));
+       } else if ((strncmp(cmd_buf, "add ethtype filter", 18) == 0) ||
+                  (strncmp(cmd_buf, "rem ethtype filter", 18) == 0)) {
+               u16 ethtype;
+               u16 queue;
+               bool add = false;
+               int ret;
+
+               if (strncmp(cmd_buf, "add", 3) == 0)
+                       add = true;
+
+               cnt = sscanf(&cmd_buf[18],
+                            "%hi %hi",
+                            &ethtype, &queue);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev,
+                                "%s ethtype filter: bad command string, cnt=%d\n",
+                                add ? "add" : "rem",
+                                cnt);
+                       goto command_write_done;
+               }
+               ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+                                       pf->hw.mac.addr,
+                                       ethtype, 0,
+                                       pf->vsi[pf->lan_vsi]->seid,
+                                       queue, add, NULL, NULL);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                               "%s: add/rem Control Packet Filter AQ command failed =0x%x\n",
+                               add ? "add" : "rem",
+                               pf->hw.aq.asq_last_status);
+                       goto command_write_done;
+               }
+
        } else if (strncmp(cmd_buf, "dcb off", 7) == 0) {
                u8 tc = i40e_pf_get_num_tc(pf);
                /* Allow disabling only when in single TC mode */
@@ -1873,6 +1935,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
                if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
                        int ret;
+
                        ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
                        if (ret) {
                                dev_info(&pf->pdev->dev,
@@ -1899,6 +1962,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 #endif /* CONFIG_DCB */
                } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
                        int ret;
+
                        ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
                                                pf->hw.mac.addr,
                                                I40E_ETH_P_LLDP, 0,
@@ -1929,6 +1993,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        u16 llen, rlen;
                        int ret;
                        u8 *buff;
+
                        buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
                        if (!buff)
                                goto command_write_done;
@@ -1955,6 +2020,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        u16 llen, rlen;
                        int ret;
                        u8 *buff;
+
                        buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
                        if (!buff)
                                goto command_write_done;
@@ -1980,6 +2046,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        buff = NULL;
                } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
                        int ret;
+
                        ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
                                                                true, NULL);
                        if (ret) {
@@ -1990,6 +2057,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        }
                } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
                        int ret;
+
                        ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
                                                                false, NULL);
                        if (ret) {
@@ -2062,22 +2130,31 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                kfree(buff);
                buff = NULL;
        } else if (strncmp(cmd_buf, "set rss_size", 12) == 0) {
-               u16 queue_count;
-               cnt = sscanf(&cmd_buf[12], "%hi", &queue_count);
+               int q_count;
+
+               cnt = sscanf(&cmd_buf[12], "%i", &q_count);
                if (cnt != 1) {
                        dev_info(&pf->pdev->dev,
                                 "set rss_size: bad command string, cnt=%d\n", cnt);
                        goto command_write_done;
                }
-               dev_info(&pf->pdev->dev, " set rss_size requesting %d queues\n", queue_count);
+               if (q_count <= 0) {
+                       dev_info(&pf->pdev->dev,
+                                "set rss_size: %d is too small\n",
+                                q_count);
+                       goto command_write_done;
+               }
+               dev_info(&pf->pdev->dev,
+                        "set rss_size requesting %d queues\n", q_count);
                rtnl_lock();
-               i40e_reconfig_rss_queues(pf, queue_count);
+               i40e_reconfig_rss_queues(pf, q_count);
                rtnl_unlock();
-               dev_info(&pf->pdev->dev, " new rss_size %d\n", pf->rss_size);
+               dev_info(&pf->pdev->dev, "new rss_size %d\n", pf->rss_size);
        } else if (strncmp(cmd_buf, "get bw", 6) == 0) {
                i40e_status status;
                u32 max_bw, min_bw;
                bool min_valid, max_valid;
+
                status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
                                                   &min_valid, &max_valid);
 
@@ -2110,7 +2187,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                u32 max_bw, min_bw;
 
                /* Set the valid bit for this PF */
-               bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+               bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
 
                /* Get the bw's */
                cnt = sscanf(&cmd_buf[7], "%d %d", &max_bw, &min_bw);
@@ -2211,6 +2288,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
                dev_info(&pf->pdev->dev, "  dump desc aq\n");
                dev_info(&pf->pdev->dev, "  dump reset stats\n");
+               dev_info(&pf->pdev->dev, "  dump debug fwdata <cluster_id> <table_id> <index>\n");
                dev_info(&pf->pdev->dev, "  msg_enable [level]\n");
                dev_info(&pf->pdev->dev, "  read <reg>\n");
                dev_info(&pf->pdev->dev, "  write <reg> <value>\n");
@@ -2225,9 +2303,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
                dev_info(&pf->pdev->dev, "  add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
                dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
-               dev_info(&pf->pdev->dev, "  fd-atr off\n");
-               dev_info(&pf->pdev->dev, "  fd-atr on\n");
                dev_info(&pf->pdev->dev, "  fd current cnt");
+               dev_info(&pf->pdev->dev, "  add ethtype filter <ethtype> <to_queue>");
+               dev_info(&pf->pdev->dev, "  rem ethtype filter <ethtype> <to_queue>");
                dev_info(&pf->pdev->dev, "  lldp start\n");
                dev_info(&pf->pdev->dev, "  lldp stop\n");
                dev_info(&pf->pdev->dev, "  lldp get local\n");
@@ -2367,6 +2445,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
                }
        } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
                int mtu;
+
                cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
                             &vsi_seid, &mtu);
                if (cnt != 2) {
@@ -2502,7 +2581,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf)
 create_failed:
        dev_info(dev, "debugfs dir/file for %s failed\n", name);
        debugfs_remove_recursive(pf->i40e_dbg_pf);
-       return;
 }
 
 /**
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_devids.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_devids.h
new file mode 100644 (file)
index 0000000..eeaa4d8
--- /dev/null
@@ -0,0 +1,51 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#define _I40E_DEVIDS_H_
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID           0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710          0x1572
+#define I40E_DEV_ID_QEMU               0x1574
+#define I40E_DEV_ID_KX_A               0x157F
+#define I40E_DEV_ID_KX_B               0x1580
+#define I40E_DEV_ID_KX_C               0x1581
+#define I40E_DEV_ID_QSFP_A             0x1583
+#define I40E_DEV_ID_QSFP_B             0x1584
+#define I40E_DEV_ID_QSFP_C             0x1585
+#define I40E_DEV_ID_10G_BASE_T         0x1586
+#define I40E_DEV_ID_20G_KR2            0x1587
+#define I40E_DEV_ID_20G_KR2_A          0x1588
+#define I40E_DEV_ID_10G_BASE_T4                0x1589
+#define I40E_DEV_ID_VF                 0x154C
+#define I40E_DEV_ID_VF_HV              0x1571
+
+#define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
+                                        (d) == I40E_DEV_ID_QSFP_B  || \
+                                        (d) == I40E_DEV_ID_QSFP_C)
+
similarity index 96%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_diag.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_diag.c
index 7d4289d2ccb2a3ce319053c81e50379e91876c8d..e9e65d7dc748d072ef3e2e0d544fcb2cf39e54f1 100644 (file)
@@ -159,13 +159,10 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
        ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
        if ((ret_code == I40E_SUCCESS) &&
            ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
-            (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
-               ret_code = i40e_validate_nvm_checksum(hw, NULL);
-       } else {
-               ret_code = I40E_ERR_DIAG_TEST_FAILED;
-       }
-
-       return ret_code;
+            BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+               return i40e_validate_nvm_checksum(hw, NULL);
+       else
+               return I40E_ERR_DIAG_TEST_FAILED;
 }
 
 /**
similarity index 75%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_ethtool.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ethtool.c
index 106d0fb5aa74168626126d405ff898d2cceb334f..db4565145c674dee7ed955206f213046d781e30a 100644 (file)
@@ -98,11 +98,9 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
        I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
        I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
        I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+       I40E_VSI_STAT("tx_linearize", tx_linearize),
 };
 
-static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
-                                struct ethtool_rxnfc *cmd);
-
 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
  * but they are separate.  This device supports Virtualization, and
  * as such might have several netdevs supporting VMDq and FCoE going
@@ -125,7 +123,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
        I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
        I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
-       I40E_PF_STAT("crc_errors", stats.crc_errors),
+       I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
        I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
        I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
        I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
@@ -160,7 +158,10 @@ static struct i40e_stats i40e_gstrings_stats[] = {
 #endif /* HAVE_PTP_1588_CLOCK */
        I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
        I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+       I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+       I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
        I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+       I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
 #ifdef I40E_ADD_PROBES
        I40E_PF_STAT("tx_tcp_segments", tcp_segs),
        I40E_PF_STAT("tx_tcp_cso", tx_tcp_cso),
@@ -222,7 +223,14 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
                 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
                 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
                 / sizeof(u64))
+#define I40E_VEB_TC_STATS_LEN ( \
+               (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
+                FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
+                FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
+                FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
+                / sizeof(u64))
 #define I40E_VEB_STATS_LEN   ARRAY_SIZE(i40e_gstrings_veb_stats)
+#define I40E_VEB_STATS_TOTAL   (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
 #define I40E_PF_STATS_LEN(n)   (I40E_GLOBAL_STATS_LEN + \
                                 I40E_PFC_STATS_LEN + \
                                 I40E_VSI_STATS_LEN((n)))
@@ -251,11 +259,13 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
 
 #ifdef HAVE_ETHTOOL_GET_SSET_COUNT
 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
-       "NPAR",
+       "MFP",
+       "LinkPolling",
+       "flow-director-atr",
+       "veb-stats",
 };
 
-#define I40E_PRIV_FLAGS_STR_LEN \
-       (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
 
 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
 
@@ -298,65 +308,46 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        case I40E_PHY_TYPE_40GBASE_AOC:
                ecmd->supported = SUPPORTED_40000baseCR4_Full;
                break;
-       case I40E_PHY_TYPE_40GBASE_KR4:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_40000baseKR4_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_40000baseKR4_Full;
-               break;
        case I40E_PHY_TYPE_40GBASE_SR4:
                ecmd->supported = SUPPORTED_40000baseSR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_LR4:
                ecmd->supported = SUPPORTED_40000baseLR4_Full;
                break;
-       case I40E_PHY_TYPE_20GBASE_KR2:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_20000baseKR2_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_20000baseKR2_Full;
-               break;
-       case I40E_PHY_TYPE_10GBASE_KX4:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_10000baseKX4_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_10000baseKX4_Full;
-               break;
-       case I40E_PHY_TYPE_10GBASE_KR:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_10000baseKR_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_10000baseKR_Full;
-               break;
        case I40E_PHY_TYPE_10GBASE_SR:
        case I40E_PHY_TYPE_10GBASE_LR:
        case I40E_PHY_TYPE_1000BASE_SX:
        case I40E_PHY_TYPE_1000BASE_LX:
-               ecmd->supported = SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full;
+               ecmd->supported = SUPPORTED_10000baseT_Full;
+               if (hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_SX ||
+                   hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_LX) {
+                       ecmd->supported |= SUPPORTED_1000baseT_Full;
+                       if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               }
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               break;
-       case I40E_PHY_TYPE_1000BASE_KX:
-               ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_1000baseKX_Full;
-               ecmd->advertising = ADVERTISED_Autoneg |
-                                   ADVERTISED_1000baseKX_Full;
                break;
        case I40E_PHY_TYPE_10GBASE_T:
        case I40E_PHY_TYPE_1000BASE_T:
-       case I40E_PHY_TYPE_100BASE_TX:
                ecmd->supported = SUPPORTED_Autoneg |
                                  SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_100baseT_Full;
+                                 SUPPORTED_1000baseT_Full;
                ecmd->advertising = ADVERTISED_Autoneg;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_1000baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_1000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_100BASE_TX:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_100baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
                        ecmd->advertising |= ADVERTISED_100baseT_Full;
                break;
@@ -376,12 +367,9 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                break;
        case I40E_PHY_TYPE_SGMII:
                ecmd->supported = SUPPORTED_Autoneg |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_100baseT_Full;
+                                 SUPPORTED_1000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
-                       ecmd->advertising |= ADVERTISED_100baseT_Full;
                break;
        default:
                /* if we got here and link is up something bad is afoot */
@@ -392,8 +380,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        /* Set speed and duplex */
        switch (link_speed) {
        case I40E_LINK_SPEED_40GB:
-               /* need a SPEED_40000 in ethtool.h */
-               ethtool_cmd_speed_set(ecmd, 40000);
+               ethtool_cmd_speed_set(ecmd, SPEED_40000);
                break;
        case I40E_LINK_SPEED_20GB:
                ethtool_cmd_speed_set(ecmd, SPEED_20000);
@@ -414,7 +401,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
 }
 
 /**
- * i40e_get_settings_link_down - Get the Link settings for when link is down 
+ * i40e_get_settings_link_down - Get the Link settings for when link is down
  * @hw: hw structure
  * @ecmd: ethtool command to fill in
  *
@@ -423,80 +410,66 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
 static void i40e_get_settings_link_down(struct i40e_hw *hw,
                                      struct ethtool_cmd *ecmd)
 {
-       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+       enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
 
        /* link is down and the driver needs to fall back on
-        * device ID to determine what kinds of info to display,
-        * it's mostly a guess that may change when link is up
+        * supported phy types to figure out what info to display
         */
-       switch(hw->device_id) {
-       case I40E_DEV_ID_QSFP_A:
-       case I40E_DEV_ID_QSFP_B:
-       case I40E_DEV_ID_QSFP_C:
-               /* pluggable QSFP */
-               ecmd->supported = SUPPORTED_40000baseSR4_Full |
-                                 SUPPORTED_40000baseCR4_Full |
-                                 SUPPORTED_40000baseLR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseSR4_Full |
-                                   ADVERTISED_40000baseCR4_Full |
-                                   ADVERTISED_40000baseLR4_Full;
-               break;
-       case I40E_DEV_ID_KX_B:
-               /* backplane 40G */
-               ecmd->supported = SUPPORTED_40000baseKR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseKR4_Full;
-               break;
-       case I40E_DEV_ID_KX_C:
-               /* backplane 10G */
-               ecmd->supported = SUPPORTED_10000baseKR_Full;
-               ecmd->advertising = ADVERTISED_10000baseKR_Full;
-               break;
-       case I40E_DEV_ID_10G_BASE_T:
-               ecmd->supported = SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full |
-                                 SUPPORTED_100baseT_Full;
-               break;
-               /* If we haven't requested speeds then default
-                * to everything supported
-                */
-               if (!hw_link_info->requested_speeds) {
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full |
-                                            ADVERTISED_1000baseT_Full |
-                                            ADVERTISED_100baseT_Full;
-                       break;
-               }
-               /* Otherwise figure out what has been requested */
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
-                       ecmd->advertising |= ADVERTISED_100baseT_Full;
-               break;
-       case I40E_DEV_ID_20G_KR2:
-               /* backplane 20G */
-               ecmd->supported = SUPPORTED_20000baseKR2_Full;
-               ecmd->advertising = ADVERTISED_20000baseKR2_Full;
-               break;
-       default:
-               /* all the rest are 10G/1G */
-               ecmd->supported = SUPPORTED_10000baseT_Full |
-                                 SUPPORTED_1000baseT_Full;
-               /* If we haven't requested speeds then default
-                * to everything supported
-                */
-               if (!hw_link_info->requested_speeds) {
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full |
-                                            ADVERTISED_1000baseT_Full;
-                       break;
-               }
-               /* Otherwise figure out what has been requested */
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
-               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               break;
+       ecmd->supported = 0x0;
+       ecmd->advertising = 0x0;
+       if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                               SUPPORTED_1000baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_1000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+           phy_types & I40E_CAP_PHY_TYPE_XFI ||
+           phy_types & I40E_CAP_PHY_TYPE_SFI ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+               ecmd->supported |= SUPPORTED_10000baseT_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_10000baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_10000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+           phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+           phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+               ecmd->supported |= SUPPORTED_40000baseCR4_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                 SUPPORTED_40000baseCR4_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                   ADVERTISED_40000baseCR4_Full;
+       }
+       if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+           !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_100baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_100baseT_Full;
        }
+       if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_1000baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_1000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+               ecmd->supported |= SUPPORTED_40000baseSR4_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+               ecmd->supported |= SUPPORTED_40000baseLR4_Full;
 
        /* With no link speed and duplex are unknown */
        ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -526,6 +499,37 @@ static int i40e_get_settings(struct net_device *netdev,
 
        /* Now set the settings that don't rely on link being up/down */
 
+       /* For backplane, supported and advertised are only reliant on the
+        * phy types the NVM specifies are supported.
+        */
+       if (hw->device_id == I40E_DEV_ID_KX_B ||
+           hw->device_id == I40E_DEV_ID_KX_C ||
+           hw->device_id == I40E_DEV_ID_20G_KR2 ||
+           hw->device_id ==  I40E_DEV_ID_20G_KR2_A) {
+               ecmd->supported = SUPPORTED_Autoneg;
+               ecmd->advertising = ADVERTISED_Autoneg;
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+                       ecmd->supported |= SUPPORTED_40000baseKR4_Full;
+                       ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+                       ecmd->supported |= SUPPORTED_20000baseKR2_Full;
+                       ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+                       ecmd->supported |= SUPPORTED_10000baseKR_Full;
+                       ecmd->advertising |= ADVERTISED_10000baseKR_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+                       ecmd->supported |= SUPPORTED_10000baseKX4_Full;
+                       ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+                       ecmd->supported |= SUPPORTED_1000baseKX_Full;
+                       ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+               }
+       }
+
        /* Set autoneg settings */
        ecmd->autoneg = (hw_link_info->an_info & I40E_AQ_AN_COMPLETED ?
                          AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -627,6 +631,14 @@ static int i40e_set_settings(struct net_device *netdev,
            hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
                return -EOPNOTSUPP;
 
+       if (hw->device_id == I40E_DEV_ID_KX_B ||
+           hw->device_id == I40E_DEV_ID_KX_C ||
+           hw->device_id == I40E_DEV_ID_20G_KR2 ||
+           hw->device_id == I40E_DEV_ID_20G_KR2_A) {
+               netdev_info(netdev, "Changing settings is not supported on backplane.\n");
+               return -EOPNOTSUPP;
+       }
+
        /* get our own copy of the bits to check against */
        memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
        i40e_get_settings(netdev, &safe_ecmd);
@@ -680,28 +692,30 @@ static int i40e_set_settings(struct net_device *netdev,
 
        /* Check autoneg */
        if (autoneg == AUTONEG_ENABLE) {
-               /* If autoneg is not supported, return error */
-               if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
-                       netdev_info(netdev, "Autoneg not supported on this phy\n");
-                       return -EINVAL;
-               }
                /* If autoneg was not already enabled */
                if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+                       /* If autoneg is not supported, return error */
+                       if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+                               netdev_info(netdev, "Autoneg not supported on this phy\n");
+                               return -EINVAL;
+                       }
+                       /* Autoneg is allowed to change */
                        config.abilities = abilities.abilities |
                                           I40E_AQ_PHY_ENABLE_AN;
                        change = true;
                }
        } else {
-               /* If autoneg is supported 10GBASE_T is the only phy that
-                * can disable it, so otherwise return error
-                */
-               if (safe_ecmd.supported & SUPPORTED_Autoneg &&
-                   hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
-                       netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
-                       return -EINVAL;
-               }
                /* If autoneg is currently enabled */
                if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+                       /* If autoneg is supported 10GBASE_T is the only phy that
+                        * can disable it, so otherwise return error
+                        */
+                       if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+                           hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
+                               netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+                               return -EINVAL;
+                       }
+                       /* Autoneg is allowed to change */
                        config.abilities = abilities.abilities &
                                           ~I40E_AQ_PHY_ENABLE_AN;
                        change = true;
@@ -728,6 +742,13 @@ static int i40e_set_settings(struct net_device *netdev,
            advertise & ADVERTISED_40000baseLR4_Full)
                config.link_speed |= I40E_LINK_SPEED_40GB;
 
+       /* If speed didn't get set, set it to what it currently is.
+        * This is needed because if advertise is 0 (as it is when autoneg
+        * is disabled) then speed won't get set.
+        */
+       if (!config.link_speed)
+               config.link_speed = abilities.link_speed;
+
        if (change || (abilities.link_speed != config.link_speed)) {
                /* copy over the rest of the abilities */
                config.phy_type = abilities.phy_type;
@@ -744,7 +765,7 @@ static int i40e_set_settings(struct net_device *netdev,
                        /* Tell the OS link is going down, the link will go back up when fw
                         * says it is ready asynchronously
                         */
-                       netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
+                       i40e_print_link_message(vsi, false);
                        netif_carrier_off(netdev);
                        netif_tx_stop_all_queues(netdev);
                }
@@ -752,15 +773,17 @@ static int i40e_set_settings(struct net_device *netdev,
                /* make the aq call */
                status = i40e_aq_set_phy_config(hw, &config, NULL);
                if (status) {
-                       netdev_info(netdev, "Set phy config failed with error %d.\n",
-                                       status);
+                       netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+                                   i40e_stat_str(hw, status),
+                                   i40e_aq_str(hw, hw->aq.asq_last_status));
                        return -EAGAIN;
                }
 
-               status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+               status = i40e_update_link_info(hw);
                if (status)
-                       netdev_info(netdev, "Updating link info failed with error %d\n",
-                                       status);
+                       netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
+                                   i40e_stat_str(hw, status),
+                                   i40e_aq_str(hw, hw->aq.asq_last_status));
 
        } else {
                netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -780,8 +803,9 @@ static int i40e_nway_reset(struct net_device *netdev)
 
        ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
        if (ret) {
-               netdev_info(netdev, "link restart failed, aq_err=%d\n",
-                           pf->hw.aq.asq_last_status);
+               netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+                           i40e_stat_str(hw, ret),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                return -EIO;
        }
 
@@ -883,7 +907,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
        /* Tell the OS link is going down, the link will go back up when fw
         * says it is ready asynchronously
         */
-       netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
+       i40e_print_link_message(vsi, false);
        netif_carrier_off(netdev);
        netif_tx_stop_all_queues(netdev);
 
@@ -891,18 +915,21 @@ static int i40e_set_pauseparam(struct net_device *netdev,
        status = i40e_set_fc(hw, &aq_failures, link_up);
 
        if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
-               netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
-                        status, hw->aq.asq_last_status);
+               netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+                        i40e_stat_str(hw, status),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
-               netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
-                        status, hw->aq.asq_last_status);
+               netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+                        i40e_stat_str(hw, status),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
-               netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
-                        status, hw->aq.asq_last_status);
+               netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+                        i40e_stat_str(hw, status),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
 
@@ -1018,7 +1045,8 @@ static int i40e_set_flags(struct net_device *netdev, u32 data)
 
 #endif
 #ifdef ETHTOOL_GRXRINGS
-       supported_flags |= ETH_FLAG_NTUPLE;
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               supported_flags |= ETH_FLAG_NTUPLE;
 #endif
        rc = ethtool_op_set_flags(netdev, data, supported_flags);
        if (rc)
@@ -1029,7 +1057,7 @@ static int i40e_set_flags(struct net_device *netdev, u32 data)
        need_reset = i40e_set_ntuple(pf, netdev->features);
 #endif /* ETHTOOL_GRXRINGS */
        if (need_reset)
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
        return 0;
 }
@@ -1124,9 +1152,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
 
                cmd = (struct i40e_nvm_access *)eeprom;
                ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-               if (ret_val && 
-                   ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
-                    (hw->debug_mask & I40E_DEBUG_NVM)))
+               if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
                        dev_info(&pf->pdev->dev,
                                 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                                 ret_val, hw->aq.asq_last_status, errno,
@@ -1200,7 +1226,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
                & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
                >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
        /* register returns value in power of 2, 64Kbyte chunks. */
-       val = (64 * 1024) * (1 << val);
+       val = (64 * 1024) * BIT(val);
        return val;
 }
 
@@ -1230,10 +1256,7 @@ static int i40e_set_eeprom(struct net_device *netdev,
 
        cmd = (struct i40e_nvm_access *)eeprom;
        ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-       if (ret_val && 
-           ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
-             hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
-            (hw->debug_mask & I40E_DEBUG_NVM)))
+       if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
                dev_info(&pf->pdev->dev,
                         "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
                         ret_val, hw->aq.asq_last_status, errno,
@@ -1253,7 +1276,7 @@ static void i40e_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, i40e_driver_version_str,
                sizeof(drvinfo->version));
-       strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
+       strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
                sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
                sizeof(drvinfo->bus_info));
@@ -1344,6 +1367,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
                        /* clone ring and setup updated count */
                        tx_rings[i] = *vsi->tx_rings[i];
                        tx_rings[i].count = new_tx_count;
+                       /* the desc and bi pointers will be reallocated in the
+                        * setup call
+                        */
+                       tx_rings[i].desc = NULL;
+                       tx_rings[i].rx_bi = NULL;
                        err = i40e_setup_tx_descriptors(&tx_rings[i]);
                        if (err) {
                                while (i) {
@@ -1374,6 +1402,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
                        /* clone ring and setup updated count */
                        rx_rings[i] = *vsi->rx_rings[i];
                        rx_rings[i].count = new_rx_count;
+                       /* the desc and bi pointers will be reallocated in the
+                        * setup call
+                        */
+                       rx_rings[i].desc = NULL;
+                       rx_rings[i].rx_bi = NULL;
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
                        if (err) {
                                while (i) {
@@ -1436,8 +1469,9 @@ static int i40e_get_stats_count(struct net_device *netdev)
        struct i40e_pf *pf = vsi->back;
 
        if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
-               if (pf->lan_veb != I40E_NO_VEB)
-                       return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_LEN;
+               if ((pf->lan_veb != I40E_NO_VEB) &&
+                   (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
+                       return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL;
                else
                        return I40E_PF_STATS_LEN(netdev);
        else
@@ -1457,8 +1491,10 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
        case ETH_SS_STATS:
                if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
                        int len = I40E_PF_STATS_LEN(netdev);
-                       if (pf->lan_veb != I40E_NO_VEB)
-                               len += I40E_VEB_STATS_LEN;
+
+                       if ((pf->lan_veb != I40E_NO_VEB) &&
+                           (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
+                               len += I40E_VEB_STATS_TOTAL;
                        return len;
                } else {
                        return I40E_VSI_STATS_LEN(netdev);
@@ -1488,6 +1524,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
 #else
        struct net_device_stats *net_stats = i40e_get_vsi_stats_struct(vsi);
 #endif
+
        i40e_update_stats(vsi);
 
        for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
@@ -1541,13 +1578,21 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
        }
        rcu_read_unlock();
        if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
-               if (pf->lan_veb != I40E_NO_VEB) {
+               if ((pf->lan_veb != I40E_NO_VEB) &&
+                   (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
                        struct i40e_veb *veb = pf->veb[pf->lan_veb];
+
                        for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
                                p = (char *)veb + i40e_gstrings_veb_stats[j].stat_offset;
                                data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
                                           sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
                        }
+                       for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
+                               data[i++] = veb->tc_stats.tc_tx_packets[j];
+                               data[i++] = veb->tc_stats.tc_tx_bytes[j];
+                               data[i++] = veb->tc_stats.tc_rx_packets[j];
+                               data[i++] = veb->tc_stats.tc_rx_bytes[j];
+                       }
                }
                for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
                        p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -1612,12 +1657,27 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                }
                if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
-                       if (pf->lan_veb != I40E_NO_VEB) {
+                       if ((pf->lan_veb != I40E_NO_VEB) &&
+                           (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
                                for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
                                        snprintf(p, ETH_GSTRING_LEN, "veb.%s",
                                                 i40e_gstrings_veb_stats[i].stat_string);
                                        p += ETH_GSTRING_LEN;
                                }
+                               for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                "veb.tc_%u_tx_packets", i);
+                                       p += ETH_GSTRING_LEN;
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                "veb.tc_%u_tx_bytes", i);
+                                       p += ETH_GSTRING_LEN;
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                "veb.tc_%u_rx_packets", i);
+                                       p += ETH_GSTRING_LEN;
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                "veb.tc_%u_rx_bytes", i);
+                                       p += ETH_GSTRING_LEN;
+                               }
                        }
                        for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
                                snprintf(p, ETH_GSTRING_LEN, "port.%s",
@@ -1685,20 +1745,20 @@ static int i40e_get_ts_info(struct net_device *dev,
        else
                info->phc_index = -1;
 
-       info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
-       info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-                          (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
 
        return 0;
 #else /* HAVE_PTP_1588_CLOCK */
@@ -1711,9 +1771,18 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
+       i40e_status status;
+       bool link_up = false;
 
        netif_info(pf, hw, netdev, "link test\n");
-       if (i40e_get_link_status(&pf->hw))
+       status = i40e_get_link_status(&pf->hw, &link_up);
+       if (status != I40E_SUCCESS) {
+               netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
+               *data = 1;
+               return *data;
+       }
+
+       if (link_up)
                *data = 0;
        else
                *data = 1;
@@ -1783,6 +1852,32 @@ static int i40e_diag_test_count(struct net_device *netdev)
 }
 
 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+       struct i40e_vf *vfs = pf->vf;
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++)
+               if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
+                       return true;
+       return false;
+}
+
+static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
+{
+       struct i40e_vsi **vsi = pf->vsi;
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (!vsi[i])
+                       continue;
+               if (vsi[i]->type == I40E_VSI_VMDQ2)
+                       return true;
+       }
+
+       return false;
+}
+
 static void i40e_diag_test(struct net_device *netdev,
                           struct ethtool_test *eth_test, u64 *data)
 {
@@ -1795,16 +1890,32 @@ static void i40e_diag_test(struct net_device *netdev,
                netif_info(pf, drv, netdev, "offline testing starting\n");
 
                set_bit(__I40E_TESTING, &pf->state);
+
+               if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
+                       dev_warn(&pf->pdev->dev,
+                                "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
+                       data[I40E_ETH_TEST_REG]         = 1;
+                       data[I40E_ETH_TEST_EEPROM]      = 1;
+                       data[I40E_ETH_TEST_INTR]        = 1;
+                       data[I40E_ETH_TEST_LOOPBACK]    = 1;
+                       data[I40E_ETH_TEST_LINK]        = 1;
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+                       clear_bit(__I40E_TESTING, &pf->state);
+                       goto skip_ol_tests;
+               }
+
                /* If the device is online then take it offline */
                if (if_running)
                        /* indicate we're in test mode */
                        dev_close(netdev);
                else
-                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+                       /* This reset does not affect link - if it is
+                        * changed to a type of reset that does affect
+                        * link then the following link test would have
+                        * to be moved to before the reset
+                        */
+                       i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
-               /* Link test performed before hardware reset
-                * so autoneg doesn't interfere with test result
-                */
                if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1822,7 +1933,7 @@ static void i40e_diag_test(struct net_device *netdev,
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                clear_bit(__I40E_TESTING, &pf->state);
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
                if (if_running)
                        dev_open(netdev);
@@ -1840,6 +1951,8 @@ static void i40e_diag_test(struct net_device *netdev,
                data[I40E_ETH_TEST_LOOPBACK] = 0;
        }
 
+skip_ol_tests:
+
        netif_info(pf, drv, netdev, "testing finished\n");
 }
 
@@ -1853,7 +1966,7 @@ static void i40e_get_wol(struct net_device *netdev,
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
+       if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
                wol->supported = 0;
                wol->wolopts = 0;
        } else {
@@ -1886,7 +1999,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if (((1 << hw->port) & wol_nvm_bits))
+       if (BIT(hw->port) & wol_nvm_bits)
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
@@ -1978,6 +2091,14 @@ static int i40e_get_coalesce(struct net_device *netdev,
 
        ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
        ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+       /* we use the _usecs_high to store/set the interrupt rate limit
+        * that the hardware supports, that almost but not quite
+        * fits the original intent of the ethtool variable,
+        * the rx_coalesce_usecs_high limits total interrupts
+        * per second from both tx/rx sources.
+        */
+       ec->rx_coalesce_usecs_high = vsi->int_rate_limit;
+       ec->tx_coalesce_usecs_high = vsi->int_rate_limit;
 
        return 0;
 }
@@ -1996,6 +2117,17 @@ static int i40e_set_coalesce(struct net_device *netdev,
        if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
                vsi->work_limit = ec->tx_max_coalesced_frames_irq;
 
+       /* tx_coalesce_usecs_high is ignored, use rx-usecs-high to adjust limit */
+       if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
+               netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
+               return -EINVAL;
+       }
+
+       if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+               netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+               return -EINVAL;
+       }
+
        vector = vsi->base_vector;
        if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
            (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
@@ -2009,6 +2141,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
                return -EINVAL;
        }
 
+       vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+
        if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
            (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
                vsi->tx_itr_setting = ec->tx_coalesce_usecs;
@@ -2032,11 +2166,14 @@ static int i40e_set_coalesce(struct net_device *netdev,
                vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
 
        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+
                q_vector = vsi->q_vectors[i];
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
                q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
                wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+               wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
                i40e_flush(hw);
        }
 
@@ -2186,9 +2323,90 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
        else
                fsp->ring_cookie = rule->q_index;
 
+       if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
+               struct i40e_vsi *vsi;
+
+               vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
+               if (vsi && vsi->type == I40E_VSI_SRIOV) {
+                       fsp->h_ext.data[1] = htonl(vsi->vf_id);
+                       fsp->m_ext.data[1] = htonl(0x1);
+               }
+       }
+
        return 0;
 }
 
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+#define VXLAN_PORT     8472
+
+/**
+ * i40e_get_vxlan_filter_ethtool - get a vxlan filter by loc
+ * @pf: pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * get vxlan filter by loc.
+ * Returns 0 if success.
+ **/
+static int i40e_get_vxlan_filter_ethtool(struct i40e_pf *pf,
+                                        struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fsp =
+                       (struct ethtool_rx_flow_spec *)&cmd->fs;
+       static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       struct i40e_cloud_filter *rule, *pfilter = NULL;
+       struct i40e_vsi *dst_vsi;
+       struct hlist_node *node2;
+       __be32 tena;
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->cloud_filter_list, cloud_node) {
+               /* filter found with the id */
+               if (rule->id == fsp->location) {
+                       pfilter = rule;
+                       break;
+               }
+       }
+       if (!pfilter) {
+               dev_info(&pf->pdev->dev, "No cloud filter with loc %d\n",
+                       fsp->location);
+               return -ENOENT;
+       }
+
+       dst_vsi = i40e_find_vsi_from_id(pf, pfilter->vsi_id);
+       if (dst_vsi && dst_vsi->type == I40E_VSI_SRIOV) {
+               fsp->h_ext.data[1] = htonl(dst_vsi->vf_id);
+               fsp->m_ext.data[1] = htonl(0x1);
+       }
+
+       ether_addr_copy(fsp->h_u.ether_spec.h_dest, pfilter->outer_mac);
+       ether_addr_copy(fsp->h_u.ether_spec.h_source, pfilter->inner_mac);
+       fsp->h_u.usr_ip4_spec.ip4dst = pfilter->inner_ip[0];
+       fsp->h_ext.vlan_tci = pfilter->inner_vlan;
+
+       tena = htonl(pfilter->tenant_id);
+       memcpy(&fsp->h_ext.data[0], &tena, sizeof(tena));
+
+       fsp->ring_cookie = pfilter->queue_id;
+       if (pfilter->flags & I40E_CLOUD_FIELD_OMAC)
+               ether_addr_copy(fsp->m_u.ether_spec.h_dest, mac_broadcast);
+       if (pfilter->flags & I40E_CLOUD_FIELD_IMAC)
+               ether_addr_copy(fsp->m_u.ether_spec.h_source, mac_broadcast);
+       if (pfilter->flags & I40E_CLOUD_FIELD_IVLAN)
+               fsp->m_ext.vlan_tci = htons(0x7fff);
+       if (pfilter->flags & I40E_CLOUD_FIELD_TEN_ID)
+               *(__be32 *)&fsp->m_ext.data[0] = htonl(0x1);
+       if (pfilter->flags & I40E_CLOUD_FIELD_IIP) {
+               fsp->flow_type = IP_USER_FLOW;
+               fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+       } else
+               fsp->flow_type = ETHER_FLOW;
+
+       fsp->flow_type |= FLOW_MAC_EXT;
+
+       return 0;
+}
+
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
 /**
  * i40e_get_rxnfc - command to get RX flow classification rules
  * @netdev: network interface device structure
@@ -2223,6 +2441,9 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
                ret = 0;
                break;
        case ETHTOOL_GRXCLSRULE:
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+               ret = i40e_get_vxlan_filter_ethtool(pf, cmd);
+#endif
                ret = i40e_get_ethtool_fdir_entry(pf, cmd);
                break;
        case ETHTOOL_GRXCLSRLALL:
@@ -2268,10 +2489,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -2280,10 +2501,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -2292,12 +2513,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                default:
                        return -EINVAL;
@@ -2306,12 +2527,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                default:
                        return -EINVAL;
@@ -2324,7 +2545,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
                break;
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
@@ -2333,15 +2554,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
                break;
        case IPV4_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+                       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
                break;
        case IPV6_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+                       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
                break;
        default:
                return -EINVAL;
@@ -2487,6 +2708,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
        struct i40e_fdir_filter *input;
        struct i40e_pf *pf;
        int ret = -EINVAL;
+       u16 vf_id;
 
        if (!vsi)
                return -EINVAL;
@@ -2536,7 +2758,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
        input->pctype = 0;
        input->dest_vsi = vsi->id;
        input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
-       input->cnt_index  = pf->fd_sb_cnt_idx;
+       input->cnt_index  = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
        input->flow_type = fsp->flow_type;
        input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
 
@@ -2548,7 +2770,22 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
        input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
        input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
 
+       if (ntohl(fsp->m_ext.data[1])) {
+               if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
+                       netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+                       goto free_input;
+               }
+               vf_id = ntohl(fsp->h_ext.data[1]);
+               /* Find vsi id from vf id and override dest vsi */
+               input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
+               if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
+                       netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+                       goto free_input;
+               }
+       }
+
        ret = i40e_add_del_fdir(vsi, input, true);
+free_input:
        if (ret)
                kfree(input);
        else
@@ -2557,6 +2794,280 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
        return ret;
 }
 
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+/**
+ * i40e_vxlan_filter_mask2flags- Convert Vxlan filter details to filter type
+ * @fsp: RX flow classification rules
+ * @flags: Resultant combination of all the fields to decide the tuple
+ *
+ * Returns 0 if a valid filter type was identified.
+ **/
+static inline i40e_status i40e_vxlan_filter_mask2flags(
+                                       struct ethtool_rx_flow_spec *fsp,
+                                       u8 *flags)
+{
+       static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       static const u8 mac_zero[] = { 0, 0, 0, 0, 0, 0 };
+       u8 i = 0;
+       u16 vlan_tci = fsp->m_ext.vlan_tci;
+       u32 vxlan_id = 0;
+
+       *flags = 0;
+
+       if (ntohl(fsp->h_ext.data[0] != 0xffffffff))
+               vxlan_id = ntohl(fsp->m_ext.data[0]);
+
+       switch (fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+       case ETHER_FLOW:
+               if (!memcmp(fsp->m_u.ether_spec.h_dest, mac_broadcast,
+                   sizeof(mac_broadcast)))
+                       i |= I40E_CLOUD_FIELD_OMAC;
+               else if (!memcmp(fsp->m_u.ether_spec.h_dest, mac_zero,
+                   sizeof(mac_broadcast)))
+                       i &= ~I40E_CLOUD_FIELD_OMAC;
+               else
+                       return I40E_ERR_CONFIG;
+
+               if (!memcmp(fsp->m_u.ether_spec.h_source, mac_broadcast,
+                   sizeof(mac_broadcast)))
+                       i |= I40E_CLOUD_FIELD_IMAC;
+               else if (!memcmp(fsp->m_u.ether_spec.h_source, mac_zero,
+                   sizeof(mac_broadcast)))
+                       i &= ~I40E_CLOUD_FIELD_IMAC;
+               else
+                       return I40E_ERR_CONFIG;
+               break;
+
+       case IP_USER_FLOW:
+               if (fsp->m_u.usr_ip4_spec.ip4dst == 0xffffffff)
+                       i |= I40E_CLOUD_FIELD_IIP;
+               else if (fsp->m_u.usr_ip4_spec.ip4dst == 0)
+                       i &= ~I40E_CLOUD_FIELD_IIP;
+               else
+                       return I40E_ERR_CONFIG;
+               break;
+       default:
+               return I40E_ERR_CONFIG;
+       }
+
+       switch (vlan_tci & 0x7fff) {
+       case 0x7fff:
+               i |= I40E_CLOUD_FIELD_IVLAN;
+               break;
+       case 0:
+               i &= ~I40E_CLOUD_FIELD_IVLAN;
+               break;
+       default:
+               return I40E_ERR_CONFIG;
+       }
+
+       switch (vxlan_id & 0xffffff) {
+       case 0xffffff:
+               i |= I40E_CLOUD_FIELD_TEN_ID;
+               break;
+       case 0:
+               i &= ~I40E_CLOUD_FIELD_TEN_ID;
+               break;
+       default:
+               return I40E_ERR_CONFIG;
+       }
+
+       *flags = i;
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_add_vxlan_filter_ethtool - Add vxlan filter
+ * @pf: pointer to the physical function struct
+ * @fsp: RX flow classification rules
+ *
+ * Add vxlan filter for a specific flow spec.
+ * Returns 0 if the filter were successfully added.
+ **/
+static int i40e_add_vxlan_filter_ethtool(struct i40e_pf *pf,
+                                        struct ethtool_rx_flow_spec *fsp)
+{
+       struct i40e_vsi *dst_vsi, *vsi = NULL;
+       struct i40e_cloud_filter *rule, *parent, *pfilter = NULL;
+       struct hlist_node *node2;
+       u16 vf_id, vsi_idx;
+       u8 flags = 0;
+       int ret;
+
+       if (ntohl(fsp->m_ext.data[1])) {
+               vf_id = (u16)ntohl(fsp->h_ext.data[1]);
+               /* if Vf id >= num_vfs, program a filter for PF Main VSI */
+               if (vf_id >= pf->num_alloc_vfs) {
+                       dev_info(&pf->pdev->dev,
+                                "Out of range vf_id, adding the cloud filter for Main VSI %d\n",
+                                vf_id);
+                       dst_vsi = pf->vsi[pf->lan_vsi];
+               } else {
+                       vsi_idx = pf->vf[vf_id].lan_vsi_idx;
+                       dst_vsi = pf->vsi[vsi_idx];
+                       if (!dst_vsi) {
+                               dev_info(&pf->pdev->dev,
+                                        "Invalid vf_id %d\n", vf_id);
+                               return -EINVAL;
+                       }
+               }
+       } else {
+               dst_vsi = pf->vsi[pf->lan_vsi];
+       }
+
+       if (fsp->ring_cookie >= dst_vsi->num_queue_pairs) {
+               dev_info(&pf->pdev->dev,
+                        "Invalid queue_id %llu\n", fsp->ring_cookie);
+               return -EINVAL;
+       }
+
+       ret = i40e_vxlan_filter_mask2flags(fsp, &flags);
+       if (ret || !flags) {
+               dev_info(&pf->pdev->dev,
+                        "Invalid mask config, ret = %d, flags = %d\n",
+                        ret, flags);
+               return -EINVAL;
+       }
+
+       parent = NULL;
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->cloud_filter_list, cloud_node) {
+               /* filter exists with the id */
+               if (rule->id >= fsp->location) {
+                       pfilter = rule;
+                       break;
+               }
+               parent = rule;
+       }
+       /* if filter exists with same id, delete old */
+       if (pfilter && (pfilter->id == fsp->location)) {
+               vsi = i40e_find_vsi_from_id(pf, pfilter->vsi_id);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev, "no vsi with vsi_id %d\n",
+                               pfilter->vsi_id);
+                       return -ENOSYS;
+               }
+               ret = i40e_add_del_cloud_filter(pf, pfilter, vsi, false);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "fail to delete old cloud filter, err = %d\n",
+                                ret);
+                       return -ENOSYS;
+               }
+               hlist_del(&pfilter->cloud_node);
+               kfree(pfilter);
+               pf->num_cloud_filters--;
+       }
+
+       pfilter = kzalloc(sizeof(*pfilter), GFP_KERNEL);
+
+       if (!pfilter)
+               return -ENOMEM;
+
+       pfilter->id = fsp->location;
+       pfilter->vsi_id = dst_vsi->id;
+       switch (fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+       case ETHER_FLOW:
+               ether_addr_copy(pfilter->outer_mac,
+                       fsp->h_u.ether_spec.h_dest);
+               ether_addr_copy(pfilter->inner_mac,
+                       fsp->h_u.ether_spec.h_source);
+               break;
+       case IP_USER_FLOW:
+               pfilter->inner_ip[0] = fsp->h_u.usr_ip4_spec.ip4dst;
+               break;
+       default:
+               dev_info(&pf->pdev->dev, "unknown flow type\n");
+               kfree(pfilter);
+               return I40E_ERR_CONFIG;
+       }
+
+       pfilter->inner_vlan = fsp->h_ext.vlan_tci;
+
+       if (ntohl(fsp->h_ext.data[0] != 0xffffffff))
+               pfilter->tenant_id = ntohl(fsp->h_ext.data[0]);
+       /* else this is a  L3 VEB filter for non-tunneled packets or a tuple
+        * without vni.
+        */
+       pfilter->queue_id = fsp->ring_cookie;
+       pfilter->tunnel_type = I40E_CLOUD_TNL_TYPE_XVLAN;
+       pfilter->flags = flags;
+
+       ret = i40e_add_del_cloud_filter(pf, pfilter, dst_vsi, true);
+       if (ret) {
+               kfree(pfilter);
+               dev_info(&pf->pdev->dev,
+                        "fail to add cloud filter, err = %d\n", ret);
+               return -ENOSYS;
+       }
+
+       INIT_HLIST_NODE(&pfilter->cloud_node);
+       /* add filter to the list */
+
+       if (parent)
+               hlist_add_behind(&pfilter->cloud_node, &parent->cloud_node);
+       else
+               hlist_add_head(&pfilter->cloud_node,
+                                      &pf->cloud_filter_list);
+       pf->num_cloud_filters++;
+
+       return 0;
+}
+
+/**
+ * i40e_del_vxlan_filter_ethtool - del vxlan filter
+ * @pf: pointer to the physical function struct
+ * @fsp: RX flow classification rules
+ *
+ * Delete vxlan filter for a specific flow spec.
+ * Returns 0 if the filter was successfully deleted.
+ **/
+static int i40e_del_vxlan_filter_ethtool(struct i40e_pf *pf,
+                                        struct ethtool_rx_flow_spec *fsp)
+{
+       struct i40e_cloud_filter *rule, *pfilter = NULL;
+       struct i40e_vsi *vsi = NULL;
+       struct hlist_node *node2;
+       int ret;
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->cloud_filter_list, cloud_node) {
+               /* filter found with the id */
+               if (rule->id == fsp->location) {
+                       pfilter = rule;
+                       break;
+               }
+       }
+       if (!pfilter) {
+               dev_info(&pf->pdev->dev, "no cloud filter exists with id %d\n",
+                       fsp->location);
+               return -ENOENT;
+       }
+       vsi = i40e_find_vsi_from_id(pf, pfilter->vsi_id);
+       if (!vsi) {
+               dev_info(&pf->pdev->dev,
+                        "no vsi with vsi_id %d\n", pfilter->vsi_id);
+               return -ENOSYS;
+       }
+
+       ret = i40e_add_del_cloud_filter(pf, pfilter, vsi, false);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "failed to delete cloud filter, err = %d\n",
+                        ret);
+               return -ENOSYS;
+       }
+
+       /* remove filter from the list */
+       hlist_del(&pfilter->cloud_node);
+       kfree(pfilter);
+       pf->num_cloud_filters--;
+
+       return 0;
+}
+
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+
 /**
  * i40e_set_rxnfc - command to set RX flow classification rules
  * @netdev: network interface device structure
@@ -2567,6 +3078,9 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+       struct ethtool_rx_flow_spec *fsp;
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
        int ret = -EOPNOTSUPP;
@@ -2576,10 +3090,30 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
                ret = i40e_set_rss_hash_opt(pf, cmd);
                break;
        case ETHTOOL_SRXCLSRLINS:
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+               fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+#define I40E_USER_DATA_VXLAN_CLOUD_FILTER 1
+               if (ntohl(fsp->h_ext.data[0]) >=
+                                       I40E_USER_DATA_VXLAN_CLOUD_FILTER)
+                       ret = i40e_add_vxlan_filter_ethtool(pf, fsp);
+               else
+                       ret = i40e_add_fdir_ethtool(vsi, cmd);
+               break;
+#else
                ret = i40e_add_fdir_ethtool(vsi, cmd);
                break;
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
        case ETHTOOL_SRXCLSRLDEL:
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+               fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+               if (ntohl(fsp->h_ext.data[0]) >=
+                                       I40E_USER_DATA_VXLAN_CLOUD_FILTER)
+                       ret = i40e_del_vxlan_filter_ethtool(pf, fsp);
+               else
+                       ret = i40e_del_fdir_entry(vsi, cmd);
+#else
                ret = i40e_del_fdir_entry(vsi, cmd);
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
                break;
        default:
                break;
@@ -2676,7 +3210,6 @@ static int i40e_set_channels(struct net_device *dev,
 
 #endif /* ETHTOOL_SCHANNELS */
 #define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
-#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
 #if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
 /**
  * i40e_get_rxfh_key_size - get the RSS hash key size
@@ -2704,6 +3237,10 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
 }
 
 #if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
+#ifdef HAVE_RXFH_HASHFUNC
+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+                        u8 *hfunc)
+#else
 /**
  * i40e_get_rxfh - get the rx flow hash indirection table
  * @netdev: network interface device structure
@@ -2713,6 +3250,7 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
  * Reads the indirection table directly from the hardware. Always returns 0.
  **/
 static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+#endif
 #else
 /**
  * i40e_get_rxfh - get the rx flow hash indirection table
@@ -2731,15 +3269,22 @@ static int i40e_get_rxfh_indir(struct net_device *netdev, u32 *indir)
        u32 reg_val;
        int i, j;
 
-       if (indir) {
-               for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
-                       reg_val = rd32(hw, I40E_PFQF_HLUT(i));
-                       indir[j++] = reg_val & 0xff;
-                       indir[j++] = (reg_val >> 8) & 0xff;
-                       indir[j++] = (reg_val >> 16) & 0xff;
-                       indir[j++] = (reg_val >> 24) & 0xff;
-               }
+#ifdef HAVE_RXFH_HASHFUNC
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+#endif
+       if (!indir)
+               return 0;
+
+       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               reg_val = rd32(hw, I40E_PFQF_HLUT(i));
+               indir[j++] = reg_val & 0xff;
+               indir[j++] = (reg_val >> 8) & 0xff;
+               indir[j++] = (reg_val >> 16) & 0xff;
+               indir[j++] = (reg_val >> 24) & 0xff;
        }
+
 #if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
        if (key) {
                for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
@@ -2785,6 +3330,7 @@ static int i40e_get_rxfh_indir(struct net_device *netdev,
        indir->size = ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4);
        return 0;
 }
+
 #endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
 #endif /* ETHTOOL_GRXFHINDIR */
 #ifdef ETHTOOL_SRXFHINDIR
@@ -2796,18 +3342,23 @@ static int i40e_get_rxfh_indir(struct net_device *netdev,
  * @indir: indirection table
  * @key: hash key
  *
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
  * returns 0 after programming the table.
  **/
+#ifdef HAVE_RXFH_HASHFUNC
+static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
+                        const u8 *key, const u8 hfunc)
+#else
 static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
                         const u8 *key)
+#endif
 #else
 /**
  * i40e_set_rxfh_indir - set the rx flow hash indirection table
  * @netdev: network interface device structure
  * @indir: indirection table
  *
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
  * returns 0 after programming the table.
  **/
 static int i40e_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
@@ -2820,20 +3371,25 @@ static int i40e_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
        u32 reg_val;
        int i, j;
 
-       if (indir) {
-               /* Verify user input. */
-               for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) {
-                       if (indir[i] >= pf->rss_size)
-                               return -EINVAL;
-               }
+#ifdef HAVE_RXFH_HASHFUNC
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+#endif
+       if (!indir)
+               return 0;
 
-               for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
-                       reg_val = indir[j++];
-                       reg_val |= indir[j++] << 8;
-                       reg_val |= indir[j++] << 16;
-                       reg_val |= indir[j++] << 24;
-                       wr32(hw, I40E_PFQF_HLUT(i), reg_val);
-               }
+       /* Verify user input. */
+       for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) {
+               if (indir[i] >= pf->rss_size)
+                       return -EINVAL;
+       }
+
+       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               reg_val = indir[j++];
+               reg_val |= indir[j++] << 8;
+               reg_val |= indir[j++] << 16;
+               reg_val |= indir[j++] << 24;
+               wr32(hw, I40E_PFQF_HLUT(i), reg_val);
        }
 #if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
        if (key) {
@@ -2854,7 +3410,7 @@ static int i40e_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
  * @netdev: network interface device structure
  * @indir: indirection table
  *
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
  * returns 0 after programming the table.
  **/
 static int i40e_set_rxfh_indir(struct net_device *netdev,
@@ -2907,11 +3463,52 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
        struct i40e_pf *pf = vsi->back;
        u32 ret_flags = 0;
 
-       ret_flags |= pf->hw.func_caps.npar_enable ?
-               I40E_PRIV_FLAGS_NPAR_FLAG : 0;
+       ret_flags |= pf->flags & I40E_FLAG_MFP_ENABLED ?
+               I40E_PRIV_FLAGS_MFP_FLAG : 0;
+       ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
+               I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
+       ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
+               I40E_PRIV_FLAGS_FD_ATR : 0;
+       ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
+               I40E_PRIV_FLAGS_VEB_STATS : 0;
 
        return ret_flags;
 }
+
+/**
+ * i40e_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+
+       if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
+               pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
+       else
+               pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
+
+       /* allow the user to control the state of the Flow
+        * Director ATR (Application Targeted Routing) feature
+        * of the driver
+        */
+       if (flags & I40E_PRIV_FLAGS_FD_ATR) {
+               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+       } else {
+               pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+               pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+       }
+
+       if (flags & I40E_PRIV_FLAGS_VEB_STATS)
+               pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
+       else
+               pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
+       return 0;
+}
 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
 
 static const struct ethtool_ops i40e_ethtool_ops = {
@@ -2973,6 +3570,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
 #else /* HAVE_ETHTOOL_GET_SSET_COUNT */
        .get_sset_count         = i40e_get_sset_count,
        .get_priv_flags         = i40e_get_priv_flags,
+       .set_priv_flags         = i40e_set_priv_flags,
 #endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
        .get_ethtool_stats      = i40e_get_ethtool_stats,
 #ifdef HAVE_ETHTOOL_GET_PERM_ADDR
similarity index 97%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_fcoe.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.c
index bcceeb7ab0cd0e528ca94b430166463cda8e4f6c..61dcba6271b87b54f7154fb21e811b8f9de09d6e 100644 (file)
@@ -126,7 +126,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
  *
  * The FC EOF is converted to the value understood by HW for descriptor
  * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
- * first.
+ * first and that already checks for all supported valid eof values.
  **/
 static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
 {
@@ -140,9 +140,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
        case FC_EOF_A:
                return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
        default:
-               /* FIXME: still returns 0 */
-               pr_err("Unrecognized EOF %x\n", eof);
-               return 0;
+               /* Supported valid eof shall be already checked by
+                * calling i40e_fcoe_eof_is_supported() first,
+                * therefore this default case shall never hit.
+                */
+               WARN_ON(1);
+               return -EINVAL;
        }
 }
 
@@ -277,10 +280,8 @@ out:
 /**
  * i40e_fcoe_sw_init - sets up the HW for FCoE
  * @pf: pointer to PF
- *
- * Returns 0 if FCoE is supported otherwise the error code
  **/
-int i40e_init_pf_fcoe(struct i40e_pf *pf)
+void i40e_init_pf_fcoe(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        u32 val;
@@ -291,20 +292,20 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
        pf->fcoe_hmc_filt_num = 0;
 
        if (!pf->hw.func_caps.fcoe) {
-               dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
-               return 0;
+               dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n");
+               return;
        }
 
        if (!pf->hw.func_caps.dcb) {
                dev_warn(&pf->pdev->dev,
                         "Hardware is not DCB capable not enabling FCoE.\n");
-               return 0;
+               return;
        }
 
        /* enable FCoE hash filter */
        val = rd32(hw, I40E_PFQF_HENA(1));
-       val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
-       val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+       val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+       val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
        val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
        wr32(hw, I40E_PFQF_HENA(1), val);
 
@@ -313,10 +314,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
        pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
 
        /* Reserve 4K DDP contexts and 20K filter size for FCoE */
-       pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
-                                I40E_DMA_CNTX_BASE_SIZE;
+       pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+                               I40E_DMA_CNTX_BASE_SIZE;
        pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
-                               (1 << I40E_HASH_FILTER_SIZE_16K) *
+                               BIT(I40E_HASH_FILTER_SIZE_16K) *
                                I40E_HASH_FILTER_BASE_SIZE;
 
        /* FCoE object: max 16K filter buckets and 4K DMA contexts */
@@ -331,7 +332,6 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
        wr32(hw, I40E_GLFCOE_RCTL, val);
 
        dev_info(&pf->pdev->dev, "FCoE is supported.\n");
-       return 0;
 }
 
 #ifdef CONFIG_DCB
@@ -354,7 +354,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
                if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
                    app.protocolid == ETH_P_FCOE) {
                        tc = dcbcfg->etscfg.prioritytable[app.priority];
-                       enabled_tc |= (1 << tc);
+                       enabled_tc |= BIT(tc);
                        break;
                }
        }
@@ -1323,8 +1323,7 @@ static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
        /* MACLEN is ether header length in words not bytes */
        td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
-       return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
-                          td_cmd, td_offset);
+       i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset);
 }
 
 /**
@@ -1342,6 +1341,7 @@ static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb)
        skb->mac_len = sizeof(struct ethhdr);
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veth = (struct vlan_ethhdr *) eth_hdr(skb);
+
                protocol = veth->h_vlan_encapsulated_proto;
                skb->mac_len += sizeof(struct vlan_hdr);
        }
@@ -1449,8 +1449,12 @@ static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu)
  * @features: the feature set that the stack is suggesting
  *
  **/
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static int i40e_fcoe_set_features(struct net_device *netdev, u32 features)
+#else
 static int i40e_fcoe_set_features(struct net_device *netdev,
                             netdev_features_t features)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -1504,6 +1508,13 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = {
 #ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
        .ndo_fcoe_ddp_target    = i40e_fcoe_ddp_target,
 #endif
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext i40e_fcoe_netdev_ops_ext = {
+       .size = sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
 #ifdef HAVE_NDO_SET_FEATURES
        .ndo_set_features       = i40e_fcoe_set_features,
 #endif /* HAVE_NDO_SET_FEATURES */
@@ -1526,6 +1537,9 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
 {
        struct i40e_hw *hw = &vsi->back->hw;
        struct i40e_pf *pf = vsi->back;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       u32 hw_features;
+#endif
 
        if (vsi->type != I40E_VSI_FCOE)
                return;
@@ -1552,8 +1566,14 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
        netdev->features |= NETIF_F_ALL_FCOE;
        netdev->vlan_features |= NETIF_F_ALL_FCOE;
 #ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       hw_features = get_netdev_hw_features(netdev);
+       hw_features |= netdev->features;
+       set_netdev_hw_features(netdev, hw_features);
+#else
        netdev->hw_features |= netdev->features;
 #endif
+#endif
 #ifdef IFF_UNICAST_FLT
        netdev->priv_flags |= IFF_UNICAST_FLT;
 #endif
@@ -1573,10 +1593,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
         */
        netdev->dev_port = 1;
 #endif
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
        i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
        i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
        i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* use san mac */
        ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
similarity index 97%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_fcoe.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.h
index 25c9d9143840da90a63987d0b373d71b81382601..88a2bac1b1ff581fe00f678352a5be93553cc82a 100644 (file)
@@ -59,9 +59,9 @@
        (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
 
 #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT   \
-       (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+       BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
 #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT   \
-       (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+       BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
 
 #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e)    \
        I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_helper.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_helper.h
new file mode 100644 (file)
index 0000000..003291a
--- /dev/null
@@ -0,0 +1,138 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HELPER_H_
+#define _I40E_HELPER_H_
+
+/**
+ * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+inline int i40e_allocate_dma_mem_d(struct i40e_hw *hw,
+                                  struct i40e_dma_mem *mem,
+                                  u64 size, u32 alignment)
+{
+       struct i40e_pf *nf = (struct i40e_pf *)hw->back;
+
+       mem->size = ALIGN(size, alignment);
+       mem->va = dma_zalloc_coherent(&nf->pdev->dev, mem->size,
+                                     &mem->pa, GFP_KERNEL);
+       if (!mem->va)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ * i40e_free_dma_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+inline int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+       struct i40e_pf *nf = (struct i40e_pf *)hw->back;
+
+       dma_free_coherent(&nf->pdev->dev, mem->size, mem->va, mem->pa);
+       mem->va = NULL;
+       mem->pa = 0;
+       mem->size = 0;
+
+       return 0;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+inline int i40e_allocate_virt_mem_d(struct i40e_hw *hw,
+                                   struct i40e_virt_mem *mem,
+                                   u32 size)
+{
+       mem->size = size;
+       mem->va = kzalloc(size, GFP_KERNEL);
+
+       if (!mem->va)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ * i40e_free_virt_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+inline int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+       /* it's ok to kfree a NULL pointer */
+       kfree(mem->va);
+       mem->va = NULL;
+       mem->size = 0;
+
+       return 0;
+}
+
+/**
+ * i40e_init_spinlock_d - OS specific spinlock init for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_init_spinlock_d(struct i40e_spinlock *sp)
+{
+       mutex_init((struct mutex *)sp);
+}
+
+/**
+ * i40e_acquire_spinlock_d - OS specific spinlock acquire for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
+{
+       mutex_lock((struct mutex *)sp);
+}
+
+/**
+ * i40e_release_spinlock_d - OS specific spinlock release for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_release_spinlock_d(struct i40e_spinlock *sp)
+{
+       mutex_unlock((struct mutex *)sp);
+}
+
+/**
+ * i40e_destroy_spinlock_d - OS specific spinlock destroy for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_destroy_spinlock_d(struct i40e_spinlock *sp)
+{
+       mutex_destroy((struct mutex *)sp);
+}
+#endif /* _I40E_HELPER_H_ */
similarity index 91%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_hmc.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.c
index 8b41fad2f2ee23212eb742d3c09ccd06b8ff303d..948949c55b17a35cc09d575cb8b5c5b88352affd 100644 (file)
@@ -122,6 +122,7 @@ exit:
  * @hw: pointer to our HW structure
  * @hmc_info: pointer to the HMC configuration information structure
  * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
  *
  * This function:
  *     1. Initializes the pd entry
@@ -135,12 +136,14 @@ exit:
  **/
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index)
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg)
 {
        i40e_status ret_code = I40E_SUCCESS;
        struct i40e_hmc_pd_table *pd_table;
        struct i40e_hmc_pd_entry *pd_entry;
        struct i40e_dma_mem mem;
+       struct i40e_dma_mem *page = &mem;
        u32 sd_idx, rel_pd_idx;
        u64 *pd_addr;
        u64 page_desc;
@@ -161,19 +164,25 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
        pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
        pd_entry = &pd_table->pd_entry[rel_pd_idx];
        if (!pd_entry->valid) {
-               /* allocate a 4K backing page */
-               ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
-                                                I40E_HMC_PAGED_BP_SIZE,
-                                                I40E_HMC_PD_BP_BUF_ALIGNMENT);
-               if (ret_code)
-                       goto exit;
+               if (rsrc_pg) {
+                       pd_entry->rsrc_pg = true;
+                       page = rsrc_pg;
+               } else {
+                       /* allocate a 4K backing page */
+                       ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+                                               I40E_HMC_PAGED_BP_SIZE,
+                                               I40E_HMC_PD_BP_BUF_ALIGNMENT);
+                       if (ret_code)
+                               goto exit;
+                       pd_entry->rsrc_pg = false;
+               }
 
-               i40e_memcpy(&pd_entry->bp.addr, &mem,
+               i40e_memcpy(&pd_entry->bp.addr, page,
                            sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
                pd_entry->bp.sd_pd_index = pd_index;
                pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
                /* Set page address and valid bit */
-               page_desc = mem.pa | 0x1;
+               page_desc = page->pa | 0x1;
 
                pd_addr = (u64 *)pd_table->pd_page_addr.va;
                pd_addr += rel_pd_idx;
@@ -248,7 +257,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
        I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
 
        /* free memory here */
-       ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+       if (!pd_entry->rsrc_pg)
+               ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
        if (I40E_SUCCESS != ret_code)
                goto exit;
        if (!pd_table->ref_cnt)
@@ -295,21 +305,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
                                            u32 idx, bool is_pf)
 {
        struct i40e_hmc_sd_entry *sd_entry;
-       i40e_status ret_code = I40E_SUCCESS;
+
+       if (!is_pf)
+               return I40E_NOT_SUPPORTED;
 
        /* get the entry and decrease its ref counter */
        sd_entry = &hmc_info->sd_table.sd_entry[idx];
-       if (is_pf) {
-               I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
-       } else {
-               ret_code = I40E_NOT_SUPPORTED;
-               goto exit;
-       }
-       ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
-       if (I40E_SUCCESS != ret_code)
-               goto exit;
-exit:
-       return ret_code;
+       I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+       return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
 }
 
 /**
@@ -349,20 +353,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
                                              u32 idx, bool is_pf)
 {
-       i40e_status ret_code = I40E_SUCCESS;
        struct i40e_hmc_sd_entry *sd_entry;
 
+       if (!is_pf)
+               return I40E_NOT_SUPPORTED;
+
        sd_entry = &hmc_info->sd_table.sd_entry[idx];
-       if (is_pf) {
-               I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
-       } else {
-               ret_code = I40E_NOT_SUPPORTED;
-               goto exit;
-       }
-       /* free memory here */
-       ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
-       if (I40E_SUCCESS != ret_code)
-               goto exit;
-exit:
-       return ret_code;
+       I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+       return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
 }
similarity index 96%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_hmc.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.h
index 1b11ff3a40fd6af7bdfffa7d42170c02bc802098..ccfc19ce400ceaf3df3929a83451e1db06f886b2 100644 (file)
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
 struct i40e_hmc_pd_entry {
        struct i40e_hmc_bp bp;
        u32 sd_index;
+       bool rsrc_pg;
        bool valid;
 };
 
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
                 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |              \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
-               (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);            \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+               BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);              \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index);
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg);
 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
                                        struct i40e_hmc_info *hmc_info,
                                        u32 idx);
similarity index 98%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_lan_hmc.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_lan_hmc.c
index 141fc6baeb3fa2d5d3c5c0da6c78c1f1ae79b998..96ca9e53a9a8dc22d3a91fbf66ba94c0fa26d231 100644 (file)
@@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
        obj->cnt = txq_num;
        obj->base = 0;
        size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (txq_num > obj->max_cnt) {
@@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
        obj->base = i40e_align_l2obj_base(obj->base);
        size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (rxq_num > obj->max_cnt) {
@@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
        obj->base = i40e_align_l2obj_base(obj->base);
        size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (fcoe_cntx_num > obj->max_cnt) {
@@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
        obj->base = i40e_align_l2obj_base(obj->base);
        size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (fcoe_filt_num > obj->max_cnt) {
@@ -387,7 +387,7 @@ i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
                                /* update the pd table entry */
                                ret_code = i40e_add_pd_table_entry(hw,
                                                                info->hmc_info,
-                                                               i);
+                                                               i, NULL);
                                if (I40E_SUCCESS != ret_code) {
                                        pd_error = true;
                                        break;
@@ -431,9 +431,8 @@ exit_sd_error:
                        pd_idx1 = max(pd_idx,
                                      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
                        pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
-                       for (i = pd_idx1; i < pd_lmt1; i++) {
+                       for (i = pd_idx1; i < pd_lmt1; i++)
                                i40e_remove_pd_bp(hw, info->hmc_info, i);
-                       }
                        i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
                        break;
                case I40E_SD_TYPE_DIRECT:
@@ -763,7 +762,7 @@ static void i40e_write_byte(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = ((u8)1 << ce_info->width) - 1;
+       mask = BIT(ce_info->width) - 1;
 
        src_byte = *from;
        src_byte &= mask;
@@ -804,7 +803,7 @@ static void i40e_write_word(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = ((u16)1 << ce_info->width) - 1;
+       mask = BIT(ce_info->width) - 1;
 
        /* don't swizzle the bits until after the mask because the mask bits
         * will be in a different bit position on big endian machines
@@ -854,7 +853,7 @@ static void i40e_write_dword(u8 *hmc_bits,
         * to 5 bits so the shift will do nothing
         */
        if (ce_info->width < 32)
-               mask = ((u32)1 << ce_info->width) - 1;
+               mask = BIT(ce_info->width) - 1;
        else
                mask = ~(u32)0;
 
@@ -906,7 +905,7 @@ static void i40e_write_qword(u8 *hmc_bits,
         * to 6 bits so the shift will do nothing
         */
        if (ce_info->width < 64)
-               mask = ((u64)1 << ce_info->width) - 1;
+               mask = BIT_ULL(ce_info->width) - 1;
        else
                mask = ~(u64)0;
 
@@ -948,7 +947,7 @@ static void i40e_read_byte(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = ((u8)1 << ce_info->width) - 1;
+       mask = BIT(ce_info->width) - 1;
 
        /* shift to correct alignment */
        mask <<= shift_width;
@@ -986,7 +985,7 @@ static void i40e_read_word(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = ((u16)1 << ce_info->width) - 1;
+       mask = BIT(ce_info->width) - 1;
 
        /* shift to correct alignment */
        mask <<= shift_width;
@@ -1036,7 +1035,7 @@ static void i40e_read_dword(u8 *hmc_bits,
         * to 5 bits so the shift will do nothing
         */
        if (ce_info->width < 32)
-               mask = ((u32)1 << ce_info->width) - 1;
+               mask = BIT(ce_info->width) - 1;
        else
                mask = ~(u32)0;
 
@@ -1089,7 +1088,7 @@ static void i40e_read_qword(u8 *hmc_bits,
         * to 6 bits so the shift will do nothing
         */
        if (ce_info->width < 64)
-               mask = ((u64)1 << ce_info->width) - 1;
+               mask = BIT_ULL(ce_info->width) - 1;
        else
                mask = ~(u64)0;
 
similarity index 85%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_main.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_main.c
index f989dd599813133d126c576438ccb119de7ba6a6..fc9b3dbb6e66f28bb58312454c67ae97533d97f4 100644 (file)
@@ -26,6 +26,7 @@
 
 /* Local includes */
 #include "i40e.h"
+#include "i40e_helper.h"
 #include "i40e_diag.h"
 #ifdef HAVE_VXLAN_RX_OFFLOAD
 #ifdef HAVE_VXLAN_CHECKS
@@ -43,6 +44,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_HW_PERF
 #define DRV_FPGA
+#define DRV_X722
 #define DRV_A0
 #ifdef I40E_MSI_INTERRUPT
 #define DRV_KERN "-msi"
@@ -55,11 +57,11 @@ static const char i40e_driver_string[] =
 #endif
 
 #define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 48
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 47
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
-            __stringify(DRV_VERSION_BUILD) DRV_HW_PERF DRV_FPGA DRV_A0 DRV_KERN
+            __stringify(DRV_VERSION_BUILD) DRV_HW_PERF DRV_FPGA DRV_X722 DRV_A0 DRV_KERN
 const char i40e_driver_version_str[] = DRV_VERSION;
 static const char i40e_copyright[] = "Copyright (c) 2013 - 2015 Intel Corporation.";
 
@@ -81,7 +83,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  *   Class, Class Mask, private data (not used) }
  */
-static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
+static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
@@ -91,7 +93,9 @@ static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
        /* required last entry */
        {0, }
 };
@@ -100,7 +104,7 @@ MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
 #define I40E_MAX_VF_COUNT 128
 #define OPTION_UNSET    -1
 #define I40E_PARAM_INIT { [0 ... I40E_MAX_NIC] = OPTION_UNSET}
-#define I40E_MAX_NIC 32
+#define I40E_MAX_NIC 64
 #if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
 #ifdef CONFIG_PCI_IOV
 static int max_vfs[I40E_MAX_NIC+1] = I40E_PARAM_INIT;
@@ -121,134 +125,7 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
-/**
- * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to fill out
- * @size: size of memory requested
- * @alignment: what to align the allocation to
- **/
-int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
-                           u64 size, u32 alignment)
-{
-       struct i40e_pf *pf = (struct i40e_pf *)hw->back;
-
-       /* TODO: fix this later, by making sure no callers mess up */
-       WARN_ON(!mem);
-       if (!mem)
-               return I40E_ERR_PARAM;
-
-       mem->size = ALIGN(size, alignment);
-       mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
-                                     &mem->pa, GFP_KERNEL);
-       if (!mem->va)
-               return -ENOMEM;
-
-       return 0;
-}
-
-/**
- * i40e_free_dma_mem_d - OS specific memory free for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to free
- **/
-int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
-{
-       struct i40e_pf *pf = (struct i40e_pf *)hw->back;
-
-       /* TODO: fix this later, by making sure no callers mess up */
-       WARN_ON(!mem || !mem->va);
-       if (!mem || !mem->va)
-               return I40E_ERR_PARAM;
-
-       dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
-       mem->va = NULL;
-       mem->pa = 0;
-       mem->size = 0;
-
-       return 0;
-}
-
-/**
- * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to fill out
- * @size: size of memory requested
- **/
-int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
-                            u32 size)
-{
-       /* TODO: fix this later, by making sure no callers mess up */
-       WARN_ON(!mem);
-       if (!mem)
-               return I40E_ERR_PARAM;
-
-       mem->size = size;
-       mem->va = kzalloc(size, GFP_KERNEL);
-
-       if (!mem->va)
-               return -ENOMEM;
-
-       return 0;
-}
-
-/**
- * i40e_free_virt_mem_d - OS specific memory free for shared code
- * @hw:   pointer to the HW structure
- * @mem:  ptr to mem struct to free
- **/
-int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
-{
-       /* TODO: fix this later, by making sure no callers mess up */
-       WARN_ON(!mem);
-       if (!mem)
-               return I40E_ERR_PARAM;
-
-       /* it's ok to kfree a NULL pointer */
-       kfree(mem->va);
-       mem->va = NULL;
-       mem->size = 0;
-
-       return 0;
-}
-
-/* these functions are replaced in build.mk sed expressions */
-
-/**
- * i40e_init_spinlock_d - OS specific spinlock init for shared code
- * @sp: pointer to a spinlock declared in driver space
- **/
-void i40e_init_spinlock_d(struct i40e_spinlock *sp)
-{
-       mutex_init((struct mutex *)sp);
-}
-
-/**
- * i40e_acquire_spinlock_d - OS specific spinlock acquire for shared code
- * @sp: pointer to a spinlock declared in driver space
- **/
-void i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
-{
-       mutex_lock((struct mutex *)sp);
-}
-
-/**
- * i40e_release_spinlock_d - OS specific spinlock release for shared code
- * @sp: pointer to a spinlock declared in driver space
- **/
-void i40e_release_spinlock_d(struct i40e_spinlock *sp)
-{
-       mutex_unlock((struct mutex *)sp);
-}
-
-/**
- * i40e_destroy_spinlock_d - OS specific spinlock destroy for shared code
- * @sp: pointer to a spinlock declared in driver space
- **/
-void i40e_destroy_spinlock_d(struct i40e_spinlock *sp)
-{
-       mutex_destroy((struct mutex *)sp);
-}
+static struct workqueue_struct *i40e_wq;
 
 /**
  * i40e_get_lump - find a lump of free generic resource
@@ -298,10 +175,10 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
                        ret = i;
                        pile->search_hint = i + j;
                        break;
-               } else {
-                       /* not enough, so skip over it and continue looking */
-                       i += j;
                }
+
+               /* not enough, so skip over it and continue looking */
+               i += j;
        }
 
        return ret;
@@ -337,6 +214,22 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
        return count;
 }
 
+/**
+ * i40e_find_vsi_from_id - searches for the vsi with the given id
+ * @pf - the pf structure to search for the vsi
+ * @id - id of the vsi it is searching for
+ **/
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
+{
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i] && (pf->vsi[i]->id == id))
+                       return pf->vsi[i];
+
+       return NULL;
+}
+
 /**
  * i40e_service_event_schedule - Schedule the service task to wake up
  * @pf: board private structure
@@ -348,7 +241,7 @@ static void i40e_service_event_schedule(struct i40e_pf *pf)
        if (!test_bit(__I40E_DOWN, &pf->state) &&
            !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
            !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
-               schedule_work(&pf->service_task);
+               queue_work(i40e_wq, &pf->service_task);
 }
 
 /**
@@ -368,8 +261,41 @@ static void i40e_tx_timeout(struct net_device *netdev)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
+       struct i40e_ring *tx_ring = NULL;
+       unsigned int i, hung_queue = 0;
+       u32 head, val;
 
        pf->tx_timeout_count++;
+
+       /* find the stopped queue the same way the stack does */
+       for (i = 0; i < netdev->num_tx_queues; i++) {
+               struct netdev_queue *q;
+               unsigned long trans_start;
+
+               q = netdev_get_tx_queue(netdev, i);
+               trans_start = q->trans_start ? : netdev->trans_start;
+               if (netif_xmit_stopped(q) && time_after(jiffies,
+                       (trans_start + netdev->watchdog_timeo))) {
+                       hung_queue = i;
+                       break;
+               }
+       }
+
+       if (i == netdev->num_tx_queues) {
+               netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
+       } else {
+               /* now that we have an index, find the tx_ring struct */
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+                               if (hung_queue ==
+                                               vsi->tx_rings[i]->queue_index) {
+                                       tx_ring = vsi->tx_rings[i];
+                                       break;
+                               }
+                       }
+               }
+       }
+
 #ifdef CONFIG_DEBUG_FS
        if (vsi->block_tx_timeout) {
                netdev_info(netdev, "tx_timeout recovery disabled\n");
@@ -378,21 +304,32 @@ static void i40e_tx_timeout(struct net_device *netdev)
 #endif
 
        if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
-               pf->tx_timeout_recovery_level = 1;
+               pf->tx_timeout_recovery_level = 1;  /* reset after some time */
+       else if (time_before(jiffies,
+                      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
+               return;   /* don't do any new action before the next timeout */
+
+       if (tx_ring) {
+               head = i40e_get_head(tx_ring);
+               /* Read interrupt register */
+               if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+                       val = rd32(&pf->hw,
+                            I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+                                       tx_ring->vsi->base_vector - 1));
+               else
+                       val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+               netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+                               vsi->seid, hung_queue, tx_ring->next_to_clean,
+                               head, tx_ring->next_to_use,
+                               readl(tx_ring->tail), val);
+       }
+
        pf->tx_timeout_last_recovery = jiffies;
-       netdev_info(netdev, "tx_timeout recovery level %d\n",
-                   pf->tx_timeout_recovery_level);
+       netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
+                   pf->tx_timeout_recovery_level, hung_queue);
 
        switch (pf->tx_timeout_recovery_level) {
-       case 0:
-               /* disable and re-enable queues for the VSI */
-               if (in_interrupt()) {
-                       set_bit(__I40E_REINIT_REQUESTED, &pf->state);
-                       set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
-               } else {
-                       i40e_vsi_reinit_locked(vsi);
-               }
-               break;
        case 1:
                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
                break;
@@ -404,8 +341,6 @@ static void i40e_tx_timeout(struct net_device *netdev)
                break;
        default:
                netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
-               set_bit(__I40E_DOWN_REQUESTED, &pf->state);
-               set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
                break;
        }
 
@@ -519,6 +454,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
        stats->tx_errors        = vsi_stats->tx_errors;
        stats->tx_dropped       = vsi_stats->tx_dropped;
        stats->rx_errors        = vsi_stats->rx_errors;
+       stats->rx_dropped       = vsi_stats->rx_dropped;
        stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
        stats->rx_length_errors = vsi_stats->rx_length_errors;
 
@@ -562,11 +498,11 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
        memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
        if (vsi->rx_rings && vsi->rx_rings[0]) {
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       memset(&vsi->rx_rings[i]->stats, 0 ,
+                       memset(&vsi->rx_rings[i]->stats, 0,
                               sizeof(vsi->rx_rings[i]->stats));
-                       memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+                       memset(&vsi->rx_rings[i]->rx_stats, 0,
                               sizeof(vsi->rx_rings[i]->rx_stats));
-                       memset(&vsi->tx_rings[i]->stats, 0 ,
+                       memset(&vsi->tx_rings[i]->stats, 0,
                               sizeof(vsi->tx_rings[i]->stats));
                        memset(&vsi->tx_rings[i]->tx_stats, 0,
                               sizeof(vsi->tx_rings[i]->tx_stats));
@@ -596,6 +532,21 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
                        pf->veb[i]->stat_offsets_loaded = false;
                }
        }
+#ifdef I40E_ADD_PROBES
+       pf->tcp_segs = 0;
+       pf->tx_tcp_cso = 0;
+       pf->tx_udp_cso = 0;
+       pf->tx_sctp_cso = 0;
+       pf->tx_ip4_cso = 0;
+       pf->rx_tcp_cso = 0;
+       pf->rx_udp_cso = 0;
+       pf->rx_sctp_cso = 0;
+       pf->rx_ip4_cso = 0;
+       pf->rx_tcp_cso_err = 0;
+       pf->rx_udp_cso_err = 0;
+       pf->rx_sctp_cso_err = 0;
+       pf->rx_ip4_cso_err = 0;
+#endif
 }
 
 /**
@@ -617,6 +568,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
                               bool offset_loaded, u64 *offset, u64 *stat)
 {
        u64 new_data;
+
        if (hw->device_id == I40E_DEV_ID_QEMU) {
                new_data = rd32(hw, loreg);
                new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
@@ -628,7 +580,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
        if (likely(new_data >= *offset))
                *stat = new_data - *offset;
        else
-               *stat = (new_data + ((u64)1 << 48)) - *offset;
+               *stat = (new_data + BIT_ULL(48)) - *offset;
        *stat &= 0xFFFFFFFFFFFFULL;
 }
 
@@ -651,7 +603,7 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
        if (likely(new_data >= *offset))
                *stat = (u32)(new_data - *offset);
        else
-               *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+               *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
 }
 
 /**
@@ -882,6 +834,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        struct i40e_ring *ring = vsi->tx_rings[i];
+
                        clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
                }
        }
@@ -905,15 +858,15 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
 
        dcb_cfg = &hw->local_dcbx_config;
 
-       /* See if DCB enabled with PFC TC */
-       if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
-           !(dcb_cfg->pfc.pfcenable)) {
+       /* Collect Link XOFF stats when PFC is disabled */
+       if (!dcb_cfg->pfc.pfcenable) {
                i40e_update_link_xoff_rx(pf);
                return;
        }
 
        for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
                u64 prio_xoff = nsd->priority_xoff_rx[i];
+
                i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
                                   pf->stat_offsets_loaded,
                                   &osd->priority_xoff_rx[i],
@@ -974,6 +927,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
 #ifdef HAVE_NDO_GET_STATS64
        unsigned int start;
 #endif
+       u64 tx_linearize;
        u64 rx_p, rx_b;
        u64 tx_p, tx_b;
        u16 q;
@@ -992,7 +946,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
         */
        rx_b = rx_p = 0;
        tx_b = tx_p = 0;
-       tx_restart = tx_busy = 0;
+       tx_restart = tx_busy = tx_linearize = 0;
        rx_page = 0;
        rx_buf = 0;
        rcu_read_lock();
@@ -1013,6 +967,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                tx_p += packets;
                tx_restart += p->tx_stats.restart_queue;
                tx_busy += p->tx_stats.tx_busy;
+               tx_linearize += p->tx_stats.tx_linearize;
 
                /* Rx queue is part of the same block as Tx queue */
                p = &p[1];
@@ -1033,6 +988,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        rcu_read_unlock();
        vsi->tx_restart = tx_restart;
        vsi->tx_busy = tx_busy;
+       vsi->tx_linearize = tx_linearize;
        vsi->rx_page_failed = rx_page;
        vsi->rx_buf_failed = rx_buf;
 
@@ -1245,12 +1201,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
                           &osd->rx_jabber, &nsd->rx_jabber);
 
        /* FDIR stats */
-       i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+       i40e_stat_update32(hw,
+                          I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
                           pf->stat_offsets_loaded,
                           &osd->fd_atr_match, &nsd->fd_atr_match);
-       i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+       i40e_stat_update32(hw,
+                          I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
                           pf->stat_offsets_loaded,
                           &osd->fd_sb_match, &nsd->fd_sb_match);
+       i40e_stat_update32(hw,
+                     I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
+                     pf->stat_offsets_loaded,
+                     &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
 
        val = rd32(hw, I40E_PRTPM_EEE_STAT);
        nsd->tx_lpi_status =
@@ -1266,6 +1228,20 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
                           pf->stat_offsets_loaded,
                           &osd->rx_lpi_count, &nsd->rx_lpi_count);
 
+       if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+               nsd->fd_sb_status = false;
+       else if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
+               nsd->fd_sb_status = true;
+       else
+               nsd->fd_sb_status = false;
+
+       if (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)
+               nsd->fd_atr_status = false;
+       else if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+               nsd->fd_atr_status = true;
+       else
+               nsd->fd_atr_status = false;
+
        pf->stat_offsets_loaded = true;
 }
 
@@ -1304,6 +1280,9 @@ struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
 {
        struct i40e_mac_filter *f;
 
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
        if (!vsi || !macaddr)
                return NULL;
 
@@ -1332,6 +1311,9 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
 {
        struct i40e_mac_filter *f;
 
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
        if (!vsi || !macaddr)
                return NULL;
 
@@ -1354,11 +1336,14 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
 {
        struct i40e_mac_filter *f;
 
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
        /* Only -1 for all the filters denotes not in vlan mode
         * so we have to go through all the list in order to make sure
         */
        list_for_each_entry(f, &vsi->mac_filter_list, list) {
-               if (f->vlan >= 0)
+               if (f->vlan >= 0 || vsi->info.pvid)
                        return true;
        }
 
@@ -1382,7 +1367,12 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
 {
        struct i40e_mac_filter *f;
 
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
        list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if (vsi->info.pvid)
+                       f->vlan = le16_to_cpu(vsi->info.pvid);
                if (!i40e_find_filter(vsi, macaddr, f->vlan,
                                      is_vf, is_netdev)) {
                        if (!i40e_add_filter(vsi, macaddr, f->vlan,
@@ -1407,7 +1397,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
 {
        struct i40e_aqc_remove_macvlan_element_data element;
        struct i40e_pf *pf = vsi->back;
-       i40e_status aq_ret;
+       i40e_status ret;
 
        /* Only appropriate for the PF main VSI */
        if (vsi->type != I40E_VSI_MAIN)
@@ -1418,8 +1408,8 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
        element.vlan_tag = 0;
        element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
                        I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
-       aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
-       if (aq_ret)
+       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+       if (ret)
                return -ENOENT;
 
        return 0;
@@ -1434,6 +1424,10 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
  * @is_netdev: make sure its a netdev filter, else doesn't matter
  *
  * Returns ptr to the filter object or NULL when no memory available.
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held. If needed could add WARN/BUG_ON if lock is not held for debug
+ * purpose.
  **/
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                                        u8 *macaddr, s16 vlan,
@@ -1441,6 +1435,9 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
 {
        struct i40e_mac_filter *f;
 
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
        if (!vsi || !macaddr)
                return NULL;
 
@@ -1492,6 +1489,10 @@ add_filter_out:
  * @vlan: the vlan
  * @is_vf: make sure it's a VF filter, else doesn't matter
  * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held. If needed could add WARN/BUG_ON if lock is not held for debug
+ * purpose.
  **/
 void i40e_del_filter(struct i40e_vsi *vsi,
                     u8 *macaddr, s16 vlan,
@@ -1499,6 +1500,9 @@ void i40e_del_filter(struct i40e_vsi *vsi,
 {
        struct i40e_mac_filter *f;
 
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
        if (!vsi || !macaddr)
                return;
 
@@ -1519,6 +1523,7 @@ void i40e_del_filter(struct i40e_vsi *vsi,
        } else {
                /* make sure we don't remove a filter in use by VF or netdev */
                int min_f = 0;
+
                min_f += (f->is_vf ? 1 : 0);
                min_f += (f->is_netdev ? 1 : 0);
 
@@ -1577,6 +1582,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
 
        if (vsi->type == I40E_VSI_MAIN) {
                i40e_status ret;
+
                ret = i40e_aq_mac_address_write(&vsi->back->hw,
                                                I40E_AQC_WRITE_TYPE_LAA_WOL,
                                                addr->sa_data, NULL);
@@ -1596,8 +1602,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
                i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
        } else {
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
                                false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
        }
 
        if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
@@ -1608,16 +1616,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                element.flags = CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
                i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
        } else {
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
                                    false, false);
                if (f)
                        f->is_laa = true;
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
        }
 
-       i40e_sync_vsi_filters(vsi);
        ether_addr_copy(netdev->dev_addr, addr->sa_data);
 
-       return 0;
+       return i40e_sync_vsi_filters(vsi, false);
 }
 
 /**
@@ -1657,7 +1666,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
                /* Find numtc from enabled TC bitmap */
                for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                       if (enabled_tc & (1 << i)) /* TC is enabled */
+                       if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
                                numtc++;
                }
                if (!numtc) {
@@ -1680,13 +1689,14 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
        else
                qcount = vsi->alloc_queue_pairs;
-       num_tc_qps = qcount/numtc;
-       num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(&pf->hw));
+
+       num_tc_qps = qcount / numtc;
+       num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
 
        /* Setup queue offset/count for all TCs for given VSI */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
                /* See if the given TC is enabled for the given VSI */
-               if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+               if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { /* TC is enabled */
                        int pow, num_qps;
 
                        switch (vsi->type) {
@@ -1712,7 +1722,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                        /* find the next higher power-of-2 of num queue pairs */
                        num_qps = qcount;
                        pow = 0;
-                       while (num_qps && ((1 << pow) < qcount)) {
+                       while (num_qps && (BIT_ULL(pow) < qcount)) {
                                pow++;
                                num_qps >>= 1;
                        }
@@ -1791,6 +1801,8 @@ static void i40e_set_rx_mode(struct net_device *netdev)
 #endif /* NETDEV_HW_ADDR_T_MULTICAST */
        struct netdev_hw_addr *ha;
 
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
        /* add addr if not already in the filter list */
        netdev_for_each_uc_addr(uca, netdev) {
                if (!i40e_find_mac(vsi, uca->addr, false, true)) {
@@ -1827,42 +1839,33 @@ static void i40e_set_rx_mode(struct net_device *netdev)
 
        /* remove filter if not in netdev list */
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
-               bool found = false;
 
                if (!f->is_netdev)
                        continue;
 
-               if (is_multicast_ether_addr(f->macaddr)) {
-                       netdev_for_each_mc_addr(mca, netdev) {
+               netdev_for_each_mc_addr(mca, netdev)
 #ifdef NETDEV_HW_ADDR_T_MULTICAST
-                               if (ether_addr_equal(mca->addr, f->macaddr)) {
+                       if (ether_addr_equal(mca->addr, f->macaddr))
 #else
-                               if (ether_addr_equal(mca->dmi_addr,
-                                                    f->macaddr)) {
+                       if (ether_addr_equal(mca->dmi_addr, f->macaddr))
 #endif
-                                       found = true;
-                                       break;
-                               }
-                       }
-               } else {
-                       netdev_for_each_uc_addr(uca, netdev) {
-                               if (ether_addr_equal(uca->addr, f->macaddr)) {
-                                       found = true;
-                                       break;
-                               }
-                       }
+                               goto bottom_of_search_loop;
 
-                       for_each_dev_addr(netdev, ha) {
-                               if (ether_addr_equal(ha->addr, f->macaddr)) {
-                                       found = true;
-                                       break;
-                               }
-                       }
-               }
-               if (!found)
-                       i40e_del_filter(
-                          vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+               netdev_for_each_uc_addr(uca, netdev)
+                       if (ether_addr_equal(uca->addr, f->macaddr))
+                               goto bottom_of_search_loop;
+
+               for_each_dev_addr(netdev, ha)
+                       if (ether_addr_equal(ha->addr, f->macaddr))
+                               goto bottom_of_search_loop;
+
+               /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+               i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+
+bottom_of_search_loop:
+               continue;
        }
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* check for other flag changes */
        if (vsi->current_netdev_flags != vsi->netdev->flags) {
@@ -1871,25 +1874,102 @@ static void i40e_set_rx_mode(struct net_device *netdev)
        }
 }
 
+/**
+ * i40e_mac_filter_entry_clone - Clones a MAC filter entry
+ * @src: source MAC filter entry to be clones
+ *
+ * Returns the pointer to newly cloned MAC filter entry or NULL
+ * in case of error
+ **/
+static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
+                                       struct i40e_mac_filter *src)
+{
+       struct i40e_mac_filter *f;
+
+       f = kzalloc(sizeof(*f), GFP_ATOMIC);
+       if (!f)
+               return NULL;
+       *f = *src;
+
+       INIT_LIST_HEAD(&f->list);
+
+       return f;
+}
+
+/**
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ *        those entries needs to be undone.
+ *
+ * MAC filter entries from list were slated to be removed from device.
+ **/
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+                               struct list_head *from)
+{
+       struct i40e_mac_filter *f, *ftmp;
+
+       list_for_each_entry_safe(f, ftmp, from, list) {
+               f->changed = true;
+               /* Move the element back into MAC filter list*/
+               list_move_tail(&f->list, &vsi->mac_filter_list);
+       }
+}
+
+/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ *        those entries needs to be undone.
+ *
+ * MAC filter entries from list were slated to be added from device.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+{
+       struct i40e_mac_filter *f, *ftmp;
+
+       list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+               if (f->changed == false && f->counter != 0)
+                       f->changed = true;
+       }
+}
+
+/**
+ * i40e_cleanup_add_list - Deletes the element from add list and release
+ *                     memory
+ * @from: Pointer to list which contains MAC filter entries
+ **/
+static void i40e_cleanup_add_list(struct list_head *add_list)
+{
+       struct i40e_mac_filter *f, *ftmp;
+
+       list_for_each_entry_safe(f, ftmp, add_list, list) {
+               list_del(&f->list);
+               kfree(f);
+       }
+}
+
 /**
  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
  * @vsi: ptr to the VSI
+ * @grab_rtnl: whether RTNL needs to be grabbed
  *
  * Push any outstanding VSI filter changes through the AdminQ.
  *
  * Returns 0 or error value
  **/
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
 {
-       struct i40e_mac_filter *f, *ftmp;
+       struct list_head tmp_del_list, tmp_add_list;
+       struct i40e_mac_filter *f, *ftmp, *fclone;
        bool promisc_forced_on = false;
        bool add_happened = false;
        int filter_list_len = 0;
        u32 changed_flags = 0;
-       i40e_status aq_ret = 0;
+       bool err_cond = false;
+       i40e_status ret = 0;
        struct i40e_pf *pf;
        int num_add = 0;
        int num_del = 0;
+       int aq_err = 0;
        u16 cmd_flags;
 
        /* empty array typed pointers, kcalloc later */
@@ -1905,17 +1985,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                vsi->current_netdev_flags = vsi->netdev->flags;
        }
 
+       INIT_LIST_HEAD(&tmp_del_list);
+       INIT_LIST_HEAD(&tmp_add_list);
+
        if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
                vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
 
-               filter_list_len = pf->hw.aq.asq_buf_size /
-                           sizeof(struct i40e_aqc_remove_macvlan_element_data);
-               del_list = kcalloc(filter_list_len,
-                           sizeof(struct i40e_aqc_remove_macvlan_element_data),
-                           GFP_KERNEL);
-               if (!del_list)
-                       return -ENOMEM;
-
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
                        if (!f->changed)
                                continue;
@@ -1923,6 +1999,60 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                        if (f->counter != 0)
                                continue;
                        f->changed = false;
+
+                       /* Move the element into temporary del_list */
+                       list_move_tail(&f->list, &tmp_del_list);
+               }
+
+               list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+                       if (!f->changed)
+                               continue;
+
+                       if (f->counter == 0)
+                               continue;
+                       f->changed = false;
+
+                       /* Clone MAC filter entry and add into temporary list */
+                       fclone = i40e_mac_filter_entry_clone(f);
+                       if (!fclone) {
+                               err_cond = true;
+                               break;
+                       }
+                       list_add_tail(&fclone->list, &tmp_add_list);
+               }
+
+               /* if failed to clone MAC filter entry - undo */
+               if (err_cond) {
+                       i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+                       i40e_undo_add_filter_entries(vsi);
+               }
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+               if (err_cond)
+                       i40e_cleanup_add_list(&tmp_add_list);
+       }
+
+       /* Now process 'del_list' outside the lock */
+       if (!list_empty(&tmp_del_list)) {
+
+               filter_list_len = pf->hw.aq.asq_buf_size /
+                           sizeof(struct i40e_aqc_remove_macvlan_element_data);
+               del_list = kcalloc(filter_list_len,
+                           sizeof(struct i40e_aqc_remove_macvlan_element_data),
+                           GFP_KERNEL);
+               if (!del_list) {
+                       i40e_cleanup_add_list(&tmp_add_list);
+
+                       /* Undo VSI's MAC filter entry element updates */
+                       spin_lock_bh(&vsi->mac_filter_list_lock);
+                       i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+                       i40e_undo_add_filter_entries(vsi);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+                       return -ENOMEM;
+               }
+
+               list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
                        cmd_flags = 0;
 
                        /* add to delete list */
@@ -1935,41 +2065,46 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                        del_list[num_del].flags = cmd_flags;
                        num_del++;
 
-                       /* unlink from filter list */
-                       list_del(&f->list);
-                       kfree(f);
-
                        /* flush a full buffer */
                        if (num_del == filter_list_len) {
-                               aq_ret = i40e_aq_remove_macvlan(&pf->hw,
+                               ret = i40e_aq_remove_macvlan(&pf->hw,
                                            vsi->seid, del_list, num_del,
                                            NULL);
+                               aq_err = pf->hw.aq.asq_last_status;
                                num_del = 0;
                                memset(del_list, 0, sizeof(*del_list));
 
-                               if (aq_ret &&
-                                   pf->hw.aq.asq_last_status !=
-                                                             I40E_AQ_RC_ENOENT)
-                                       dev_info(&pf->pdev->dev,
-                                                "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
-                                                aq_ret,
-                                                pf->hw.aq.asq_last_status);
+                               if (ret && aq_err != I40E_AQ_RC_ENOENT)
+                                       dev_err(&pf->pdev->dev,
+                                                "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+                                                i40e_stat_str(&pf->hw, ret),
+                                                i40e_aq_str(&pf->hw, aq_err));
                        }
+                       /* Release memory for MAC filter entries which were
+                        * synced up with HW.
+                        */
+                       list_del(&f->list);
+                       kfree(f);
                }
+
                if (num_del) {
-                       aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+                       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
                                                     del_list, num_del, NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
                        num_del = 0;
 
-                       if (aq_ret &&
-                           pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+                       if (ret && aq_err != I40E_AQ_RC_ENOENT)
                                dev_info(&pf->pdev->dev,
-                                        "ignoring delete macvlan error, err %d, aq_err %d\n",
-                                        aq_ret, pf->hw.aq.asq_last_status);
+                                        "ignoring delete macvlan error, err %s aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw, aq_err));
                }
 
                kfree(del_list);
                del_list = NULL;
+       }
+
+       if (!list_empty(&tmp_add_list)) {
 
                /* do all the adds now */
                filter_list_len = pf->hw.aq.asq_buf_size /
@@ -1977,16 +2112,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                add_list = kcalloc(filter_list_len,
                               sizeof(struct i40e_aqc_add_macvlan_element_data),
                               GFP_KERNEL);
-               if (!add_list)
+               if (!add_list) {
+                       /* Purge element from temporary lists */
+                       i40e_cleanup_add_list(&tmp_add_list);
+
+                       /* Undo add filter entries from VSI MAC filter list */
+                       spin_lock_bh(&vsi->mac_filter_list_lock);
+                       i40e_undo_add_filter_entries(vsi);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
                        return -ENOMEM;
+               }
 
-               list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
-                       if (!f->changed)
-                               continue;
+               list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
 
-                       if (f->counter == 0)
-                               continue;
-                       f->changed = false;
                        add_happened = true;
                        cmd_flags = 0;
 
@@ -2003,29 +2142,37 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                        /* flush a full buffer */
                        if (num_add == filter_list_len) {
-                               aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                            add_list, num_add,
-                                                            NULL);
+                               ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                         add_list, num_add,
+                                                         NULL);
+                               aq_err = pf->hw.aq.asq_last_status;
                                num_add = 0;
 
-                               if (aq_ret)
+                               if (ret)
                                        break;
                                memset(add_list, 0, sizeof(*add_list));
                        }
+                       /* Entries from tmp_add_list were cloned from MAC
+                        * filter list, hence clean those cloned entries
+                        */
+                       list_del(&f->list);
+                       kfree(f);
                }
+
                if (num_add) {
-                       aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                    add_list, num_add, NULL);
+                       ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                 add_list, num_add, NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
                        num_add = 0;
                }
                kfree(add_list);
                add_list = NULL;
 
-               if (add_happened && aq_ret &&
-                   pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
+               if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
                        dev_info(&pf->pdev->dev,
-                                "add filter failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
+                                "add filter failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw, aq_err));
                        if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
                            !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                      &vsi->state)) {
@@ -2040,35 +2187,67 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        /* check for changes in promiscuous modes */
        if (changed_flags & IFF_ALLMULTI) {
                bool cur_multipromisc;
+
                cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
-               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+               ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
                                                               vsi->seid,
                                                               cur_multipromisc,
                                                               NULL);
-               if (aq_ret)
+               if (ret)
                        dev_info(&pf->pdev->dev,
-                                "set multi promisc failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
+                                "set multi promisc failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
        }
        if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
                bool cur_promisc;
+
                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
                               test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                        &vsi->state));
-               aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+               if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
+                       /*  set defport ON for Main VSI instead of true promisc
+                        *  this way we will get all unicast/multicast and vlan
+                        *  promisc behavior but will not get VF or VMDq traffic
+                        *  replicated on the Main VSI.
+                        */
+                       if (pf->cur_promisc != cur_promisc) {
+                               pf->cur_promisc = cur_promisc;
+                               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+                       }
+               } else {
+                       ret = i40e_aq_set_vsi_unicast_promiscuous(
+                                                            &vsi->back->hw,
                                                             vsi->seid,
                                                             cur_promisc, NULL);
-               if (aq_ret)
-                       dev_info(&pf->pdev->dev,
-                                "set uni promisc failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
-               aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+                       if (ret)
+                               dev_info(&pf->pdev->dev,
+                                        "set unicast promisc failed, err %s, aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                        pf->hw.aq.asq_last_status));
+                       ret = i40e_aq_set_vsi_multicast_promiscuous(
+                                                                &vsi->back->hw,
+                                                                vsi->seid,
+                                                                cur_promisc,
+                                                                NULL);
+                       if (ret)
+                               dev_info(&pf->pdev->dev,
+                                        "set multicast promisc failed, err %s, aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                        pf->hw.aq.asq_last_status));
+               }
+               ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
                                                   vsi->seid,
                                                   cur_promisc, NULL);
-               if (aq_ret)
+               if (ret)
                        dev_info(&pf->pdev->dev,
-                                "set brdcast promisc failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
+                                "set brdcast promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
        }
 
        clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -2085,12 +2264,19 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
 
        if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
                return;
+
        pf->flags &= ~I40E_FLAG_FILTER_SYNC;
 
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v] &&
-                   (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
-                       i40e_sync_vsi_filters(pf->vsi[v]);
+                   (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
+                       int ret = i40e_sync_vsi_filters(pf->vsi[v], true);
+                       if (ret) {
+                               /* come back and try again later */
+                               pf->flags |= I40E_FLAG_FILTER_SYNC;
+                               break;
+                       }
+               }
        }
 }
 
@@ -2167,12 +2353,14 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
                                    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
 
        ctxt.seid = vsi->seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: update vsi failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vlan stripping failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
        }
 }
 
@@ -2196,12 +2384,14 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
                                    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
 
        ctxt.seid = vsi->seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: update vsi failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vlan stripping failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
        }
 }
 
@@ -2254,9 +2444,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
 {
        struct i40e_mac_filter *f, *add_f;
        bool is_netdev, is_vf;
+
        is_vf = (vsi->type == I40E_VSI_SRIOV);
        is_netdev = !!(vsi->netdev);
 
+       /* Locked once because all functions invoked below iterates list*/
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
        if (is_netdev) {
                add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
                                        is_vf, is_netdev);
@@ -2264,6 +2458,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
                        dev_info(&vsi->back->pdev->dev,
                                 "Could not add vlan filter %d for %pM\n",
                                 vid, vsi->netdev->dev_addr);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
                        return -ENOMEM;
                }
        }
@@ -2274,6 +2469,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
                        dev_info(&vsi->back->pdev->dev,
                                 "Could not add vlan filter %d for %pM\n",
                                 vid, f->macaddr);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
                        return -ENOMEM;
                }
        }
@@ -2295,6 +2491,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
                                dev_info(&vsi->back->pdev->dev,
                                         "Could not add filter 0 for %pM\n",
                                         vsi->netdev->dev_addr);
+                               spin_unlock_bh(&vsi->mac_filter_list_lock);
                                return -ENOMEM;
                        }
                }
@@ -2303,27 +2500,33 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
        /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
        if (vid > 0 && !vsi->info.pvid) {
                list_for_each_entry(f, &vsi->mac_filter_list, list) {
-                       if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
-                                            is_vf, is_netdev)) {
-                               i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
-                                               is_vf, is_netdev);
-                               add_f = i40e_add_filter(vsi, f->macaddr,
-                                                       0, is_vf, is_netdev);
-                               if (!add_f) {
-                                       dev_info(&vsi->back->pdev->dev,
-                                                "Could not add filter 0 for %pM\n",
-                                                f->macaddr);
-                                       return -ENOMEM;
-                               }
+                       if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+                                            is_vf, is_netdev))
+                               continue;
+                       i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+                                       is_vf, is_netdev);
+                       add_f = i40e_add_filter(vsi, f->macaddr,
+                                               0, is_vf, is_netdev);
+                       if (!add_f) {
+                               dev_info(&vsi->back->pdev->dev,
+                                       "Could not add filter 0 for %pM\n",
+                                       f->macaddr);
+                               spin_unlock_bh(&vsi->mac_filter_list_lock);
+                               return -ENOMEM;
                        }
                }
        }
 
+       /* Make sure to release before sync_vsi_filter because that
+        * function will lock/unlock as necessary
+        */
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
                return 0;
 
-       return i40e_sync_vsi_filters(vsi);
+       return i40e_sync_vsi_filters(vsi, false);
 }
 
 /**
@@ -2339,9 +2542,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
        struct i40e_mac_filter *f, *add_f;
        bool is_vf, is_netdev;
        int filter_count = 0;
+
        is_vf = (vsi->type == I40E_VSI_SRIOV);
        is_netdev = !!(netdev);
 
+       /* Locked once because all functions invoked below iterates list */
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
        if (is_netdev)
                i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
 
@@ -2372,6 +2579,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
                        dev_info(&vsi->back->pdev->dev,
                                 "Could not add filter %d for %pM\n",
                                 I40E_VLAN_ANY, netdev->dev_addr);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
                        return -ENOMEM;
                }
        }
@@ -2380,21 +2588,27 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
                list_for_each_entry(f, &vsi->mac_filter_list, list) {
                        i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
                        add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
-                                           is_vf, is_netdev);
+                                               is_vf, is_netdev);
                        if (!add_f) {
                                dev_info(&vsi->back->pdev->dev,
                                         "Could not add filter %d for %pM\n",
                                         I40E_VLAN_ANY, f->macaddr);
+                               spin_unlock_bh(&vsi->mac_filter_list_lock);
                                return -ENOMEM;
                        }
                }
        }
 
+       /* Make sure to release before sync_vsi_filter because that
+        * function with lock/unlock as necessary
+        */
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
                return 0;
 
-       return i40e_sync_vsi_filters(vsi);
+       return i40e_sync_vsi_filters(vsi, false);
 }
 
 /**
@@ -2577,7 +2791,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 {
        struct i40e_vsi_context ctxt;
-       i40e_status aq_ret;
+       i40e_status ret;
 
        vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
        vsi->info.pvid = cpu_to_le16(vid);
@@ -2586,12 +2800,14 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
                                    I40E_AQ_VSI_PVLAN_EMOD_STR;
 
        ctxt.seid = vsi->seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ctxt.info = vsi->info;
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: update vsi failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "add pvid failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
                return -ENOENT;
        }
 
@@ -2610,6 +2826,90 @@ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
 
        vsi->info.pvid = 0;
 }
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+
+/**
+ * i40e_add_del_cloud_filter - Add/del cloud filter
+ * @pf: pointer to the physical function struct
+ * @filter: cloud filter rule
+ * @vsi: pointer to the destination vsi
+ * @add: if true, add, if false, delete
+ *
+ * Add or delete a cloud filter for a specific flow spec.
+ * Returns 0 if the filter were successfully added.
+ **/
+int i40e_add_del_cloud_filter(struct i40e_pf *pf,
+                             struct i40e_cloud_filter *filter,
+                             struct i40e_vsi *vsi, bool add)
+{
+       struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+       u32 ipaddr;
+       int ret;
+       static const u16 flag_table[128] = {
+               [I40E_CLOUD_FILTER_FLAGS_OMAC]  =
+                       I40E_AQC_ADD_CLOUD_FILTER_OMAC,
+               [I40E_CLOUD_FILTER_FLAGS_IMAC]  =
+                       I40E_AQC_ADD_CLOUD_FILTER_IMAC,
+               [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN]  =
+                       I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
+               [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
+                       I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
+               [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
+                       I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
+               [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
+                       I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
+               [I40E_CLOUD_FILTER_FLAGS_IIP] =
+                       I40E_AQC_ADD_CLOUD_FILTER_IIP,
+       };
+
+       if (vsi == NULL)
+               return I40E_ERR_BAD_PTR;
+
+       if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_XVLAN)
+               return I40E_ERR_NOT_IMPLEMENTED;
+
+       if ((filter->flags >= ARRAY_SIZE(flag_table)) ||
+           (flag_table[filter->flags] == 0))
+               return I40E_ERR_CONFIG;
+
+       memset(&cld_filter, 0, sizeof(cld_filter));
+       ether_addr_copy(cld_filter.outer_mac, filter->outer_mac);
+       ether_addr_copy(cld_filter.inner_mac, filter->inner_mac);
+
+       /* the low index of data storing IP address indicate the last
+        * byte on wire.
+        */
+       ipaddr = ntohl(filter->inner_ip[0]);
+       memcpy(&cld_filter.ipaddr.v4.data, &ipaddr, 4);
+       cld_filter.inner_vlan = cpu_to_le16(ntohs(filter->inner_vlan));
+       cld_filter.tenant_id = cpu_to_le32(filter->tenant_id);
+       cld_filter.queue_number = cpu_to_le16(filter->queue_id);
+
+       /* Only supports VXLAN tunnel for now */
+       cld_filter.flags = cpu_to_le16(
+                               I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN <<
+                               I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
+
+       if (filter->flags != I40E_CLOUD_FILTER_FLAGS_OMAC)
+               cld_filter.flags |=
+                       cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE);
+
+       cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
+                       I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
+
+       if (add)
+               ret = i40e_aq_add_cloud_filters(&pf->hw, vsi->seid,
+                                               &cld_filter, 1);
+       else
+               ret = i40e_aq_remove_cloud_filters(&pf->hw, vsi->seid,
+                                               &cld_filter, 1);
+       if (ret)
+               dev_err(&pf->pdev->dev,
+                       "fail to %s cloud filter, err %d aq_err %d\n",
+                       add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
+       return ret;
+}
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
 
 /**
  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
@@ -2802,8 +3102,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
                qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
                qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
                           I40E_QTX_CTL_VFVM_INDX_MASK;
-       }
-       else {
+       } else {
                qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
        }
 
@@ -2872,7 +3171,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        rx_ctx.lrxqthresh = 2;
        rx_ctx.crcstrip = 1;
        rx_ctx.l2tsel = 1;
-       rx_ctx.showiv = 1;
+       /* this controls whether VLAN is stripped from inner headers */
+       rx_ctx.showiv = 0;
 #ifdef I40E_FCOE
        rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
 #endif
@@ -2944,6 +3244,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
                               + ETH_FCS_LEN + VLAN_HLEN;
        else
                vsi->max_frame = I40E_RXBUFFER_2048;
+
        /* figure out correct receive buffer length */
        switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
                                    I40E_FLAG_RX_PS_ENABLED)) {
@@ -2977,9 +3278,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
 #endif /* I40E_FCOE */
        /* round up for the chip's needs */
        vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
-                               (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+                               BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
        vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
-                               (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+                               BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
        /* set up individual rings */
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -3009,7 +3310,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
        }
 
        for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
-               if (!(vsi->tc_config.enabled_tc & (1 << n)))
+               if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
                        continue;
 
                qoffset = vsi->tc_config.tc_info[n].qoffset;
@@ -3080,11 +3381,9 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi)
 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
-       struct i40e_q_vector *q_vector;
        struct i40e_hw *hw = &pf->hw;
        u16 vector;
        int i, q;
-       u32 val;
        u32 qp;
 
        /* The interrupt indexing is offset by 1 in the PFINT_ITRn
@@ -3094,7 +3393,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
        qp = vsi->base_queue;
        vector = vsi->base_vector;
        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
-               q_vector = vsi->q_vectors[i];
+               struct i40e_q_vector *q_vector = vsi->q_vectors[i];
+
+               q_vector->itr_countdown = ITR_COUNTDOWN_START;
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                q_vector->rx.latency_range = I40E_LOW_LATENCY;
                wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -3103,10 +3404,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
                q_vector->tx.latency_range = I40E_LOW_LATENCY;
                wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
                     q_vector->tx.itr);
+               wr32(hw, I40E_PFINT_RATEN(vector - 1),
+                    INTRL_USEC_TO_REG(vsi->int_rate_limit));
 
                /* Linked list for the queuepairs assigned to this vector */
                wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
                for (q = 0; q < q_vector->num_ringpairs; q++) {
+                       u32 val;
+
                        val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
                              (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
                              (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
@@ -3185,6 +3490,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
        u32 val;
 
        /* set the ITR configuration */
+       q_vector->itr_countdown = ITR_COUNTDOWN_START;
        q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
        q_vector->rx.latency_range = I40E_LOW_LATENCY;
        wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
@@ -3242,24 +3548,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
        i40e_flush(hw);
 }
 
-/**
- * i40e_irq_dynamic_enable - Enable default interrupt generation settings
- * @vsi: pointer to a vsi
- * @vector: enable a particular Hw Interrupt vector
- **/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
-{
-       struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       u32 val;
-
-       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-             (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
-       wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
-       /* skip the flush */
-}
-
 /**
  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
  * @vsi: pointer to a vsi
@@ -3333,8 +3621,7 @@ int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
                                  q_vector);
                if (err) {
                        dev_info(&pf->pdev->dev,
-                                "%s: request_irq failed, error: %d\n",
-                                __func__, err);
+                                "MSIX request_irq failed, error: %d\n", err);
                        goto free_queue_irqs;
                }
 #ifdef HAVE_IRQ_AFFINITY_HINT
@@ -3403,8 +3690,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
        int i;
 
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-               for (i = vsi->base_vector;
-                    i < (vsi->num_q_vectors + vsi->base_vector); i++)
+               for (i = 0; i < vsi->num_q_vectors; i++)
                        i40e_irq_dynamic_enable(vsi, i);
        } else {
                i40e_irq_dynamic_enable_icr0(pf);
@@ -3459,6 +3745,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
 
                /* temporarily disable queue cause for NAPI processing */
                u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+
                qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
                wr32(hw, I40E_QINT_RQCTL(0), qval);
 
@@ -3505,6 +3792,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
        if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
                icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
                dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+               dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
+                        rd32(hw, I40E_PFHMC_ERRORINFO),
+                        rd32(hw, I40E_PFHMC_ERRORDATA));
        }
 
 #ifdef HAVE_PTP_1588_CLOCK
@@ -3627,10 +3917,9 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
        i += tx_ring->count;
        tx_ring->next_to_clean = i;
 
-       if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-               i40e_irq_dynamic_enable(vsi,
-                               tx_ring->q_vector->v_idx + vsi->base_vector);
-       }
+       if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+               i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
+
        return budget > 0;
 }
 
@@ -3856,9 +4145,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
                ret = i40e_pf_txq_wait(pf, pf_q, enable);
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "%s: VSI seid %d Tx ring %d %sable timeout\n",
-                                __func__, vsi->seid, pf_q,
-                                (enable ? "en" : "dis"));
+                                "VSI seid %d Tx ring %d %sable timeout\n",
+                                vsi->seid, pf_q, (enable ? "en" : "dis"));
                        break;
                }
        }
@@ -3932,9 +4220,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
                ret = i40e_pf_rxq_wait(pf, pf_q, enable);
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "%s: VSI seid %d Rx ring %d %sable timeout\n",
-                                __func__, vsi->seid, pf_q,
-                                (enable ? "en" : "dis"));
+                                "VSI seid %d Rx ring %d %sable timeout\n",
+                                vsi->seid, pf_q, (enable ? "en" : "dis"));
                        break;
                }
        }
@@ -4226,6 +4513,7 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
        i40e_vsi_free_irq(vsi);
        i40e_vsi_free_tx_resources(vsi);
        i40e_vsi_free_rx_resources(vsi);
+       vsi->current_netdev_flags = 0;
 }
 
 /**
@@ -4241,21 +4529,19 @@ void i40e_quiesce_vsi(struct i40e_vsi *vsi)
        if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
             vsi->type == I40E_VSI_FCOE) {
                dev_dbg(&vsi->back->pdev->dev,
-                        "%s: VSI seid %d skipping FCoE VSI disable\n",
-                        __func__, vsi->seid);
+                        "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
                return;
        }
 
        set_bit(__I40E_NEEDS_RESTART, &vsi->state);
-       if (vsi->netdev && netif_running(vsi->netdev)) {
+       if (vsi->netdev && netif_running(vsi->netdev))
 #ifdef HAVE_NET_DEVICE_OPS
                vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
 #else /* HAVE_NET_DEVICE_OPS */
                vsi->netdev->stop(vsi->netdev);
 #endif /* HAVE_NET_DEVICE_OPS */
-       } else {
+       else
                i40e_vsi_close(vsi);
-       }
 }
 
 /**
@@ -4324,8 +4610,8 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
                ret = i40e_pf_txq_wait(pf, pf_q, false);
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "%s: VSI seid %d Tx ring %d disable timeout\n",
-                                __func__, vsi->seid, pf_q);
+                                "VSI seid %d Tx ring %d disable timeout\n",
+                                vsi->seid, pf_q);
                        return ret;
                }
        }
@@ -4378,7 +4664,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
                if (app.selector == I40E_APP_SEL_TCPIP &&
                    app.protocolid == I40E_APP_PROTOID_ISCSI) {
                        tc = dcbcfg->etscfg.prioritytable[app.priority];
-                       enabled_tc |= (1 << tc);
+                       enabled_tc |= BIT_ULL(tc);
                        break;
                }
        }
@@ -4427,7 +4713,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
        u8 i;
 
        for (i = 0; i < num_tc; i++)
-               enabled_tc |= 1 << i;
+               enabled_tc |= BIT(i);
 
        return enabled_tc;
 }
@@ -4462,7 +4748,7 @@ u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
        /* At least have TC0 */
        enabled_tc = (enabled_tc ? enabled_tc : 0x1);
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        num_tc++;
        }
        return num_tc;
@@ -4484,11 +4770,11 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
 
        /* Find the first enabled TC */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        break;
        }
 
-       return 1 << i;
+       return BIT(i);
 }
 
 /**
@@ -4526,26 +4812,28 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
        struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       i40e_status aq_ret;
+       i40e_status ret;
        u32 tc_bw_max;
        int i;
 
        /* Get the VSI level BW configuration */
-       aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get PF vsi bw config, err %d, aq_err %d\n",
-                        aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EINVAL;
        }
 
        /* Get the VSI level BW configuration per TC */
-       aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+       ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
                                                  NULL);
-       if (aq_ret) {
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
-                        aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EINVAL;
        }
 
@@ -4584,16 +4872,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
                                       u8 *bw_share)
 {
        struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
-       i40e_status aq_ret;
+       i40e_status ret;
        int i;
 
        bw_data.tc_valid_bits = enabled_tc;
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
                bw_data.tc_bw_credits[i] = bw_share[i];
 
-       aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+       ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
                                          NULL);
-       if (aq_ret) {
+       if (ret) {
                dev_info(&vsi->back->pdev->dev,
                         "AQ command Config VSI BW allocation per TC failed = %d\n",
                         vsi->back->hw.aq.asq_last_status);
@@ -4642,7 +4930,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
                 * will set the numtc for netdev as 2 that will be
                 * referenced by the netdev layer as TC 0 and 1.
                 */
-               if (vsi->tc_config.enabled_tc & (1 << i))
+               if (vsi->tc_config.enabled_tc & BIT_ULL(i))
                        netdev_set_tc_queue(netdev,
                                        vsi->tc_config.tc_info[i].netdev_tc,
                                        vsi->tc_config.tc_info[i].qcount,
@@ -4704,7 +4992,7 @@ int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now across all VSIs */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        bw_share[i] = 1;
        }
 
@@ -4721,15 +5009,17 @@ int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
        ctxt.pf_num = vsi->back->hw.pf_id;
        ctxt.vf_num = 0;
        ctxt.uplink_seid = vsi->uplink_seid;
-       memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+       ctxt.info = vsi->info;
        i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
 
        /* Update the VSI after updating the VSI queue-mapping information */
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "update vsi failed, aq_err=%d\n",
-                        vsi->back->hw.aq.asq_last_status);
+                        "Update vsi tc config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
                goto out;
        }
        /* update the local VSI info with updated queue map */
@@ -4740,8 +5030,10 @@ int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
        ret = i40e_vsi_get_bw_info(vsi);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "Failed updating vsi bw info, aq_err=%d\n",
-                        vsi->back->hw.aq.asq_last_status);
+                        "Failed updating vsi bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
                goto out;
        }
 
@@ -4774,7 +5066,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        bw_data.tc_bw_share_credits[i] = 1;
        }
 
@@ -4782,8 +5074,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
                                                   &bw_data, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "veb bw config failed, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+                        "VEB bw config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                goto out;
        }
 
@@ -4791,8 +5084,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
        ret = i40e_veb_get_bw_info(veb);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "Failed getting veb bw config, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+                        "Failed getting veb bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 
 out:
@@ -4881,8 +5175,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
        ret = i40e_aq_resume_port_tx(hw, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "AQ command Resume Port Tx failed = %d\n",
-                         pf->hw.aq.asq_last_status);
+                        "Resume Port Tx failed, err %s aq_err %s\n",
+                         i40e_stat_str(&pf->hw, ret),
+                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                /* Schedule PF reset to recover */
                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
                i40e_service_event_schedule(pf);
@@ -4919,7 +5214,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
 
                        if (pf->flags & I40E_FLAG_MFP_ENABLED)
                                goto out;
-
                } else {
                        /* When status is not DISABLED then DCBX in FW */
                        pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
@@ -4934,8 +5228,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                }
        } else {
                dev_info(&pf->pdev->dev,
-                        "AQ Querying DCB configuration failed: aq_err %d\n",
-                        pf->hw.aq.asq_last_status);
+                        "Query for DCB configuration failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 
 out:
@@ -4949,10 +5244,15 @@ out:
  * i40e_print_link_message - print link up or down
  * @vsi: the VSI for which link needs a message
  */
-static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 {
-       char speed[SPEED_SIZE] = "Unknown";
-       char fc[FC_SIZE] = "RX/TX";
+       char *speed = "Unknown";
+       char *fc = "Unknown";
+
+       if (vsi->current_isup == isup)
+               return;
+
+       vsi->current_isup = isup;
 
        if (!isup) {
                netdev_info(vsi->netdev, "NIC Link is Down\n");
@@ -4970,19 +5270,19 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 
        switch (vsi->back->hw.phy.link_info.link_speed) {
        case I40E_LINK_SPEED_40GB:
-               strncpy(speed, "40 Gbps", SPEED_SIZE);
+               speed = "40 G";
                break;
        case I40E_LINK_SPEED_20GB:
-               strncpy(speed, "20 Gbps", SPEED_SIZE);
+               speed = "20 G";
                break;
        case I40E_LINK_SPEED_10GB:
-               strncpy(speed, "10 Gbps", SPEED_SIZE);
+               speed = "10 G";
                break;
        case I40E_LINK_SPEED_1GB:
-               strncpy(speed, "1000 Mbps", SPEED_SIZE);
+               speed = "1000 M";
                break;
        case I40E_LINK_SPEED_100MB:
-               strncpy(speed, "100 Mbps", SPEED_SIZE);
+               speed = "100 M";
                break;
        default:
                break;
@@ -4990,20 +5290,20 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 
        switch (vsi->back->hw.fc.current_mode) {
        case I40E_FC_FULL:
-               strncpy(fc, "RX/TX", FC_SIZE);
+               fc = "RX/TX";
                break;
        case I40E_FC_TX_PAUSE:
-               strncpy(fc, "TX", FC_SIZE);
+               fc = "TX";
                break;
        case I40E_FC_RX_PAUSE:
-               strncpy(fc, "RX", FC_SIZE);
+               fc = "RX";
                break;
        default:
-               strncpy(fc, "None", FC_SIZE);
+               fc = "None";
                break;
        }
 
-       netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
+       netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
                    speed, fc);
 }
 
@@ -5052,7 +5352,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
                pf->fd_add_err = pf->fd_atr_cnt = 0;
                if (pf->fd_tcp_rule > 0) {
                        pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-                       dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
                        pf->fd_tcp_rule = 0;
                }
                i40e_fdir_filter_restore(vsi);
@@ -5168,7 +5469,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
 
        /* Generate TC map for number of tc requested */
        for (i = 0; i < tc; i++)
-               enabled_tc |= (1 << i);
+               enabled_tc |= BIT_ULL(i);
 
        /* Requesting same TC configuration as already enabled */
        if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -5310,7 +5611,7 @@ err_setup_rx:
 err_setup_tx:
        i40e_vsi_free_tx_resources(vsi);
        if (vsi == pf->vsi[pf->lan_vsi])
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
        return err;
 }
@@ -5378,7 +5679,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                i40e_vc_notify_reset(pf);
 
        /* do the biggest reset indicated */
-       if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+       if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
 
                /* Request a Global Reset
                 *
@@ -5393,7 +5694,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
                wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 
-       } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
 
                /* Request a Core Reset
                 *
@@ -5405,7 +5706,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
                i40e_flush(&pf->hw);
 
-       } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
 
                /* Request a PF Reset
                 *
@@ -5418,7 +5719,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                dev_dbg(&pf->pdev->dev, "PFR requested\n");
                i40e_handle_reset_warning(pf);
 
-       } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
                int v;
 
                /* Find the VSI(s) that requested a re-init */
@@ -5426,22 +5727,21 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                         "VSI reinit requested\n");
                for (v = 0; v < pf->num_alloc_vsi; v++) {
                        struct i40e_vsi *vsi = pf->vsi[v];
+
                        if (vsi != NULL &&
                            test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
                                i40e_vsi_reinit_locked(pf->vsi[v]);
                                clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
                        }
                }
-
-               /* no further action needed, so return now */
-               return;
-       } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
                int v;
 
                /* Find the VSI(s) that needs to be brought down */
                dev_info(&pf->pdev->dev, "VSI down requested\n");
                for (v = 0; v < pf->num_alloc_vsi; v++) {
                        struct i40e_vsi *vsi = pf->vsi[v];
+
                        if (vsi != NULL &&
                            test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
                                set_bit(__I40E_DOWN, &vsi->state);
@@ -5449,13 +5749,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                                clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
                        }
                }
-
-               /* no further action needed, so return now */
-               return;
        } else {
                dev_info(&pf->pdev->dev,
                         "bad reset request 0x%08x\n", reset_flags);
-               return;
        }
 }
 
@@ -5524,8 +5820,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
                dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
        }
 
-       dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
-                need_reconfig);
+       dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
        return need_reconfig;
 }
 
@@ -5552,16 +5847,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        /* Ignore if event is not for Nearest Bridge */
        type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
                & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
-       dev_dbg(&pf->pdev->dev,
-               "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
+       dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
        if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
                return ret;
 
        /* Check MIB Type and return if event for Remote MIB update */
        type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
        dev_dbg(&pf->pdev->dev,
-               "%s: LLDP event mib type %s\n", __func__,
-               type ? "remote" : "local");
+               "LLDP event mib type %s\n", type ? "remote" : "local");
        if (type == I40E_AQ_LLDP_MIB_REMOTE) {
                /* Update the remote cached instance and return */
                ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
@@ -5570,16 +5863,18 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
                goto exit;
        }
 
-       memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
        /* Store the old configuration */
-       memcpy(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg));
+       tmp_dcbx_cfg = hw->local_dcbx_config;
 
        /* Reset the old DCBx configuration data */
        memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
        /* Get updated DCBX data from firmware */
        ret = i40e_get_dcb_config(&pf->hw);
        if (ret) {
-               dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
+               dev_info(&pf->pdev->dev,
+                        "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                goto exit;
        }
 
@@ -5691,6 +5986,7 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
 {
        u32 val, fcnt_prog;
+
        val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
        fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
        return fcnt_prog;
@@ -5703,6 +5999,7 @@ u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
 {
        u32 val, fcnt_prog;
+
        val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
        fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
                    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
@@ -5717,6 +6014,7 @@ u32 i40e_get_current_fd_count(struct i40e_pf *pf)
 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
 {
        u32 val, fcnt_prog;
+
        val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
        fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
                    ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
@@ -5730,7 +6028,9 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
  **/
 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
 {
+       struct i40e_fdir_filter *filter;
        u32 fcnt_prog, fcnt_avail;
+       struct hlist_node *node;
 
        if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
                return;
@@ -5746,7 +6046,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
                        pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
-                       dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
                }
        }
        /* Wait for some more space to be available to turn on ATR */
@@ -5754,7 +6055,20 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
                        pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-                       dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+               }
+       }
+
+       /* if hw had a problem adding a filter, delete it */
+       if (pf->fd_inv > 0) {
+               hlist_for_each_entry_safe(filter, node,
+                                         &pf->fdir_filter_list, fdir_node) {
+                       if (filter->fd_id == pf->fd_inv) {
+                               hlist_del(&filter->fdir_node);
+                               kfree(filter);
+                               pf->fdir_pf_active_filters--;
+                       }
                }
        }
 }
@@ -5776,47 +6090,49 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
        if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
                return;
 
-       if (time_after(jiffies, pf->fd_flush_timestamp +
-                               (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
+       if (!time_after(jiffies, pf->fd_flush_timestamp +
+                                (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
+               return;
 
-               /* If the flush is happening too quick and we have mostly
-                * SB rules we should not re-enable ATR for some time.
-                */
-               min_flush_time = pf->fd_flush_timestamp
-                               + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
-               fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+       /* If the flush is happening too quick and we have mostly SB rules we
+        * should not re-enable ATR for some time.
+        */
+       min_flush_time = pf->fd_flush_timestamp +
+                        (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+       fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
 
-               if (!(time_after(jiffies, min_flush_time)) &&
-                   (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+       if (!(time_after(jiffies, min_flush_time)) &&
+           (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
                        dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
-                       disable_atr = true;
-               }
+               disable_atr = true;
+       }
 
-               pf->fd_flush_timestamp = jiffies;
-               pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-               /* flush all filters */
-               wr32(&pf->hw, I40E_PFQF_CTL_1,
-                    I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
-               i40e_flush(&pf->hw);
-               pf->fd_flush_cnt++;
-               pf->fd_add_err = 0;
-               do {
-                       /* Check FD flush status every 5-6msec */
-                       usleep_range(5000, 6000);
-                       reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
-                       if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
-                               break;
-               } while (flush_wait_retry--);
-               if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
-                       dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
-               } else {
-                       /* replay sideband filters */
-                       i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-                       if (!disable_atr)
-                               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-                       clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+       pf->fd_flush_timestamp = jiffies;
+       pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+       /* flush all filters */
+       wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+       i40e_flush(&pf->hw);
+       pf->fd_flush_cnt++;
+       pf->fd_add_err = 0;
+       do {
+               /* Check FD flush status every 5-6msec */
+               usleep_range(5000, 6000);
+               reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+               if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+                       break;
+       } while (flush_wait_retry--);
+
+       if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+               dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+       } else {
+               /* replay sideband filters */
+               i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+               if (!disable_atr)
+                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+               clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
                        dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
-               }
        }
 }
 
@@ -5924,15 +6240,23 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
  **/
 static void i40e_link_event(struct i40e_pf *pf)
 {
-       bool new_link, old_link;
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        u8 new_link_speed, old_link_speed;
+       i40e_status status;
+       bool new_link, old_link;
 
        /* set this to force the get_link_status call to refresh state */
        pf->hw.phy.get_link_info = true;
 
        old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
-       new_link = i40e_get_link_status(&pf->hw);
+
+       status = i40e_get_link_status(&pf->hw, &new_link);
+       if (status != I40E_SUCCESS) {
+               dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
+                       status);
+               return;
+       }
+
        old_link_speed = pf->hw.phy.link_info_old.link_speed;
        new_link_speed = pf->hw.phy.link_info.link_speed;
 
@@ -6044,7 +6368,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
        pf->service_timer_previous = jiffies;
 
        i40e_check_hang_subtask(pf);
-       i40e_link_event(pf);
+       if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+               i40e_link_event(pf);
 
        /* Update the stats for active netdevs so the network stack
         * can look at updated numbers whenever it cares to
@@ -6053,10 +6378,12 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
                if (pf->vsi[i] && pf->vsi[i]->netdev)
                        i40e_update_stats(pf->vsi[i]);
 
-       /* Update the stats for the active switching components */
-       for (i = 0; i < I40E_MAX_VEB; i++)
-               if (pf->veb[i])
-                       i40e_update_veb_stats(pf->veb[i]);
+       if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+               /* Update the stats for the active switching components */
+               for (i = 0; i < I40E_MAX_VEB; i++)
+                       if (pf->veb[i])
+                               i40e_update_veb_stats(pf->veb[i]);
+       }
 #ifdef HAVE_PTP_1588_CLOCK
 
        i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
@@ -6073,23 +6400,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 
        rtnl_lock();
        if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
                clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
                clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
                clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
                clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
        }
 
@@ -6122,11 +6449,9 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
        struct i40e_hw *hw = &pf->hw;
        struct i40e_aqc_get_link_status *status =
                (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
-       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 
        /* save off old link status information */
-       memcpy(&pf->hw.phy.link_info_old, hw_link_info,
-              sizeof(pf->hw.phy.link_info_old));
+       hw->phy.link_info_old = hw->phy.link_info;
 
        /* Do a new status request to re-enable LSE reporting
         * and load new status information into the hw struct
@@ -6297,27 +6622,29 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
 {
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_vsi_context ctxt;
-       int aq_ret;
+       int ret;
 
        ctxt.seid = pf->main_vsi_seid;
        ctxt.pf_num = pf->hw.pf_id;
        ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return;
        }
        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
        ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vsi switch failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 }
 
@@ -6331,27 +6658,29 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
 {
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_vsi_context ctxt;
-       int aq_ret;
+       int ret;
 
        ctxt.seid = pf->main_vsi_seid;
        ctxt.pf_num = pf->hw.pf_id;
        ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return;
        }
        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
        ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vsi switch failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 }
 
@@ -6368,13 +6697,19 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb)
        struct i40e_pf *pf = veb->pf;
 
 #ifdef HAVE_BRIDGE_ATTRIBS
-       dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
-                veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+       if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+               dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+                        veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
        if (veb->bridge_mode & BRIDGE_MODE_VEPA)
                i40e_disable_pf_switch_lb(pf);
        else
-#endif
                i40e_enable_pf_switch_lb(pf);
+#else
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               i40e_enable_pf_switch_lb(pf);
+       else
+               i40e_disable_pf_switch_lb(pf);
+#endif
 }
 
 /**
@@ -6413,7 +6748,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
        ret = i40e_add_vsi(ctl_vsi);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "rebuild of owner VSI failed: %d\n", ret);
+                        "rebuild of veb_idx %d owner VSI failed: %d\n",
+                        veb->idx, ret);
                goto end_reconstitute;
        }
        i40e_vsi_reset_stats(ctl_vsi);
@@ -6438,6 +6774,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
 
                if (pf->vsi[v]->veb_idx == veb->idx) {
                        struct i40e_vsi *vsi = pf->vsi[v];
+
                        vsi->uplink_seid = veb->seid;
                        ret = i40e_add_vsi(vsi);
                        if (ret) {
@@ -6494,18 +6831,14 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
                        buf_len = data_size;
                } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
                        dev_info(&pf->pdev->dev,
-                                "capability discovery failed: aq=%d\n",
-                                pf->hw.aq.asq_last_status);
+                                "capability discovery failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, err),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
                        return -ENODEV;
                }
        } while (err);
 
-       if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
-           (pf->hw.aq.fw_maj_ver < 2)) {
-               pf->hw.func_caps.num_msix_vectors++;
-               pf->hw.func_caps.num_msix_vectors_vf++;
-       }
-
        if (pf->hw.debug_mask & I40E_DEBUG_USER)
                dev_info(&pf->pdev->dev,
                         "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -6609,7 +6942,6 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
                        dev_warn(&pf->pdev->dev,
                                 "shutdown_lan_hmc failed: %d\n", ret);
        }
-       return;
 }
 
 /**
@@ -6659,23 +6991,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
        ret = i40e_init_adminq(&pf->hw);
        if (ret) {
-               dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+               dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                goto clear_recovery;
        }
 
        /* re-verify the eeprom if we just had an EMP reset */
-       if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) {
-               clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
+       if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
                i40e_verify_eeprom(pf);
-       }
 
        i40e_clear_pxe_mode(hw);
        ret = i40e_get_capabilities(pf);
-       if (ret) {
-               dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
-                        ret);
+       if (ret)
                goto end_core_reset;
-       }
 
        ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
                                hw->func_caps.num_rx_qp,
@@ -6700,10 +7029,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
 
 #endif /* CONFIG_DCB */
 #ifdef I40E_FCOE
-       ret = i40e_init_pf_fcoe(pf);
-       if (ret) {
-               dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
-       }
+       i40e_init_pf_fcoe(pf);
 
 #endif
        /* do basic switch setup */
@@ -6718,12 +7044,16 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                                       I40E_AQ_EVENT_LINK_UPDOWN |
                                       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
        if (ret)
-               dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+               dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        /* make sure our flow control settings are restored */
        ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
        if (ret)
-               dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+               dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        /* Rebuild the VSIs and VEBs that existed before reset.
         * They are still in our local switch element arrays, so only
@@ -6784,13 +7114,24 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                msleep(75);
                ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (ret)
-                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                                pf->hw.aq.asq_last_status);
+                       dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
        }
        /* reinit the misc interrupt */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                ret = i40e_setup_misc_vector(pf);
 
+       /* Add a filter to drop all Flow control frames from any VSI from being
+        * transmitted. By doing so we stop a malicious VF from sending out
+        * PAUSE or PFC frames and potentially controlling traffic for other
+        * PF/VF VSIs.
+        * The FW can still send Flow control frames if enabled.
+        */
+       i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+                                                       pf->main_vsi_seid);
+
        /* restart the VSIs that were rebuilt and running before the reset */
        i40e_pf_unquiesce_all_vsi(pf);
 
@@ -6919,58 +7260,164 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                }
        }
 
-       /* re-enable mdd interrupt cause */
-       clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
-       reg = rd32(hw, I40E_PFINT_ICR0_ENA);
-       reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
-       wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-       i40e_flush(hw);
+       /* re-enable mdd interrupt cause */
+       clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+       reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+       reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+       wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+       i40e_flush(hw);
+}
+
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+/**
+ * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       i40e_status ret;
+       __be16 port;
+       int i;
+
+       if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+               return;
+
+       pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+
+       for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+               if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+                       pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
+                       port = pf->vxlan_ports[i];
+                       if (port)
+                               ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
+                                                    I40E_AQC_TUNNEL_TYPE_VXLAN,
+                                                    NULL, NULL);
+                       else
+                               ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
+
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
+                                        port ? "add" : "delete",
+                                        ntohs(port), i,
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
+                               pf->vxlan_ports[i] = 0;
+                       } else {
+                               dev_info(&pf->pdev->dev,
+                                        "%s vxlan port %d, index %d success\n",
+                                        port ? "add" : "delete",
+                                        ntohs(port), i);
+                       }
+               }
+       }
+}
+
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+/**
+ * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
+ * @q_idx: TX queue number
+ * @vsi: Pointer to VSI struct
+ *
+ * This function checks specified queue for given VSI. Detects hung condition.
+ * Sets hung bit since it is two step process. Before next run of service task
+ * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
+ * hung condition remain unchanged and during subsequent run, this function
+ * issues SW interrupt to recover from hung condition.
+ **/
+static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
+{
+       struct i40e_ring *tx_ring = NULL;
+       struct i40e_pf  *pf;
+       u32 head, val, tx_pending;
+       int i;
+
+       pf = vsi->back;
+
+       /* now that we have an index, find the tx_ring struct */
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+                       if (q_idx == vsi->tx_rings[i]->queue_index) {
+                               tx_ring = vsi->tx_rings[i];
+                               break;
+                       }
+               }
+       }
+
+       if (!tx_ring)
+               return;
+
+       /* Read interrupt register */
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+               val = rd32(&pf->hw,
+                    I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+                                        tx_ring->vsi->base_vector - 1));
+       else
+               val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+       head = i40e_get_head(tx_ring);
+
+       tx_pending = i40e_get_tx_pending(tx_ring);
+
+       /* Interrupts are disabled and TX pending is non-zero,
+        * trigger the SW interrupt (don't wait). Worst case
+        * there will be one extra interrupt which may result
+        * into not cleaning any queues because queues are cleaned.
+        */
+       if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+               i40e_force_wb(vsi, tx_ring->q_vector);
 }
 
-#ifdef HAVE_VXLAN_RX_OFFLOAD
 /**
- * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
- * @pf: board private structure
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @pf:  pointer to PF struct
+ *
+ * LAN VSI has netdev and netdev has TX queues. This function is to check
+ * each of those TX queues if they are hung, trigger recovery by issuing
+ * SW interrupt.
  **/
-static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+static void i40e_detect_recover_hung(struct i40e_pf *pf)
 {
-       struct i40e_hw *hw = &pf->hw;
-       i40e_status ret;
-       u8 filter_index;
-       __be16 port;
+       struct net_device *netdev;
+       struct i40e_vsi *vsi;
        int i;
 
-       if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+       /* Only for LAN VSI */
+       vsi = pf->vsi[pf->lan_vsi];
+
+       if (!vsi)
                return;
 
-       pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+       /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
+       if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+           test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+               return;
 
-       for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-               if (pf->pending_vxlan_bitmap & (1 << i)) {
-                       pf->pending_vxlan_bitmap &= ~(1 << i);
-                       port = pf->vxlan_ports[i];
-                       ret = port ?
-                             i40e_aq_add_udp_tunnel(hw, ntohs(port),
-                                                    I40E_AQC_TUNNEL_TYPE_VXLAN,
-                                                    &filter_index, NULL)
-                             : i40e_aq_del_udp_tunnel(hw, i, NULL);
+       /* Make sure type is MAIN VSI */
+       if (vsi->type != I40E_VSI_MAIN)
+               return;
 
-                       if (ret) {
-                               dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
-                                        port ? "adding" : "deleting",
-                                        ntohs(port), port ? i : i);
+       netdev = vsi->netdev;
+       if (!netdev)
+               return;
 
-                               pf->vxlan_ports[i] = 0;
-                       } else {
-                               dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
-                                        port ? "Added" : "Deleted",
-                                        ntohs(port), port ? i : filter_index);
-                       }
-               }
+       /* Bail out if netif_carrier is not OK */
+       if (!netif_carrier_ok(netdev))
+               return;
+
+       /* Go thru' TX queues for netdev */
+       for (i = 0; i < netdev->num_tx_queues; i++) {
+               struct netdev_queue *q;
+
+               q = netdev_get_tx_queue(netdev, i);
+               if (q)
+                       i40e_detect_recover_hung_queue(i, vsi);
        }
 }
 
-#endif /* HAVE_VXLAN_RX_OFFLOAD */
 /**
  * i40e_service_task - Run the driver's async subtasks
  * @work: pointer to work_struct containing our data
@@ -6983,21 +7430,24 @@ static void i40e_service_task(struct work_struct *work)
        unsigned long start_time = jiffies;
 
        /* don't bother with service tasks if a reset is in progress */
-       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+           test_bit(__I40E_SUSPENDED, &pf->state)) {
                i40e_service_event_complete(pf);
                return;
        }
 
+       i40e_detect_recover_hung(pf);
+       i40e_sync_filters_subtask(pf);
        i40e_reset_subtask(pf);
        i40e_handle_mdd_event(pf);
        i40e_vc_process_vflr_event(pf);
        i40e_watchdog_subtask(pf);
        i40e_fdir_reinit_subtask(pf);
-       i40e_sync_filters_subtask(pf);
 #ifdef HAVE_VXLAN_RX_OFFLOAD
        i40e_sync_vxlan_filters_subtask(pf);
 
-#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
        i40e_clean_adminq_subtask(pf);
 
        i40e_service_event_complete(pf);
@@ -7172,6 +7622,7 @@ int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        vsi->idx = vsi_idx;
        vsi->rx_itr_setting = pf->rx_itr_default;
        vsi->tx_itr_setting = pf->tx_itr_default;
+       vsi->int_rate_limit = 0;
        vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
                                                        pf->rss_table_size : 64;
        vsi->netdev_registered = false;
@@ -7192,6 +7643,8 @@ int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        /* Setup default MSIX irq handler for VSI */
        i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
 
+       /* Initialize VSI lock */
+       spin_lock_init(&vsi->mac_filter_list_lock);
        pf->vsi[vsi_idx] = vsi;
        ret = vsi_idx;
        goto unlock_pf;
@@ -7366,13 +7819,12 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
                                 "MSI-X vector reservation failed: %d\n", err);
                        vectors = 0;
                        break;
-               } else {
-                       /* err > 0 is the hint for retry */
-                       dev_info(&pf->pdev->dev,
-                                "MSI-X vectors wanted %d, retrying with %d\n",
-                                vectors, err);
-                       vectors = err;
                }
+               /* err > 0 is the hint for retry */
+               dev_info(&pf->pdev->dev,
+                        "MSI-X vectors wanted %d, retrying with %d\n",
+                        vectors, err);
+               vectors = err;
        }
 
        if (vectors > 0 && vectors < I40E_MIN_MSIX) {
@@ -7649,7 +8101,7 @@ err_out:
  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
  * @pf: board private structure to initialize
  **/
-static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
+static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
 {
        int vectors = 0;
        ssize_t size;
@@ -7699,11 +8151,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
        /* set up vector assignment tracking */
        size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
        pf->irq_pile = kzalloc(size, GFP_KERNEL);
+       if (!pf->irq_pile) {
+               dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+               return -ENOMEM;
+       }
        pf->irq_pile->num_entries = vectors;
        pf->irq_pile->search_hint = 0;
 
-       /* track first vector for misc interrupts */
+       /* track first vector for misc interrupts, ignore return */
        (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+
+       return 0;
 }
 
 /**
@@ -7747,66 +8205,70 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
 }
 
 /**
- * i40e_config_rss - Prepare for RSS if used
+ * i40e_config_rss_reg - Prepare for RSS if used
  * @pf: board private structure
+ * @seed: RSS hash seed
  **/
-static int i40e_config_rss(struct i40e_pf *pf)
+static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
 {
-       /* Set of random keys generated using kernel random number generator */
-       static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
-                               0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
-                               0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
-                               0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
-       struct i40e_hw *hw = &pf->hw;
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
-       u32 reg_val;
+       struct i40e_hw *hw = &pf->hw;
+       u32 *seed_dw = (u32 *)seed;
+       u32 current_queue = 0;
        u32 lut = 0;
        int i, j;
-       u64 hena;
 
        /* Fill out hash function seed */
        for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-               wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
+               wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+
+       for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               lut = 0;
+               for (j = 0; j < 4; j++) {
+                       if (current_queue == vsi->rss_size)
+                               current_queue = 0;
+                       lut |= ((current_queue) << (8 * j));
+                       current_queue++;
+               }
+               wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
+       }
+       i40e_flush(hw);
+
+       return 0;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       u8 seed[I40E_HKEY_ARRAY_SIZE];
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_val;
+       u64 hena;
+
+       netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
 
        /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
        hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
                ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
-       hena |= I40E_DEFAULT_RSS_HENA;
+       hena |= i40e_pf_get_default_rss_hena(pf);
+
        wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
        wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
        vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
 
-       /* Check capability and Set table size and register per hw expectation*/
+       /* Determine the RSS table size based on the hardware capabilities */
        reg_val = rd32(hw, I40E_PFQF_CTL_0);
-       if (pf->rss_table_size == 512)
-               reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
-       else
-               reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+       reg_val = (pf->rss_table_size == 512) ?
+                       (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+                       (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
        wr32(hw, I40E_PFQF_CTL_0, reg_val);
 
-       /* Populate the LUT with max no. of queues in round robin fashion */
-       for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
-
-               /* The assumption is that lan qp count will be the highest
-                * qp count for any PF VSI that needs RSS.
-                * If multiple VSIs need RSS support, all the qp counts
-                * for those VSIs should be a power of 2 for RSS to work.
-                * If LAN VSI is the only consumer for RSS then this requirement
-                * is not necessary.
-                */
-               if (j == vsi->rss_size)
-                       j = 0;
-               /* lut = 4-byte sliding window of 4 lut entries */
-               lut = (lut << 8) | (j &
-                        ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
-               /* On i = 3, we have 4 entries in lut; write to the register */
-               if ((i & 3) == 3)
-                       wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
-       }
-       i40e_flush(hw);
-
-       return 0;
+       return i40e_config_rss_reg(pf, seed);
 }
 
 /**
@@ -7841,10 +8303,10 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
 }
 
 /**
- * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
+ * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
  * @pf: board private structure
  **/
-i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
 {
        i40e_status status;
        bool min_valid, max_valid;
@@ -7855,27 +8317,27 @@ i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
 
        if (!status) {
                if (min_valid)
-                       pf->npar_min_bw = min_bw;
+                       pf->min_bw = min_bw;
                if (max_valid)
-                       pf->npar_max_bw = max_bw;
+                       pf->max_bw = max_bw;
        }
 
        return status;
 }
 
 /**
- * i40e_set_npar_bw_setting - Set BW settings for this PF partition
+ * i40e_set_partition_bw_setting - Set BW settings for this PF partition
  * @pf: board private structure
  **/
-i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
 {
        struct i40e_aqc_configure_partition_bw_data bw_data;
        i40e_status status;
 
        /* Set the valid bit for this PF */
-       bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
-       bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
-       bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
+       bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
+       bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+       bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
 
        /* Set the new bandwidths */
        status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
@@ -7884,10 +8346,10 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
 }
 
 /**
- * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
+ * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
  * @pf: board private structure
  **/
-i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
 {
        /* Commit temporary BW setting to permanent NVM image */
        enum i40e_admin_queue_err last_aq_status;
@@ -7907,8 +8369,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        last_aq_status = pf->hw.aq.asq_last_status;
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "Cannot acquire NVM for read access, err %d: aq_err %d\n",
-                        ret, last_aq_status);
+                        "Cannot acquire NVM for read access, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
                goto bw_commit_out;
        }
 
@@ -7923,8 +8386,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        last_aq_status = pf->hw.aq.asq_last_status;
        i40e_release_nvm(&pf->hw);
        if (ret) {
-               dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
-                        ret, last_aq_status);
+               dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
                goto bw_commit_out;
        }
 
@@ -7936,8 +8400,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        last_aq_status = pf->hw.aq.asq_last_status;
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "Cannot acquire NVM for write access, err %d: aq_err %d\n",
-                        ret, last_aq_status);
+                        "Cannot acquire NVM for write access, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
                goto bw_commit_out;
        }
        /* Write it back out unchanged to initiate update NVM,
@@ -7955,8 +8420,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        i40e_release_nvm(&pf->hw);
        if (ret)
                dev_info(&pf->pdev->dev,
-                        "BW settings NOT SAVED, err %d aq_err %d\n",
-                        ret, last_aq_status);
+                        "BW settings NOT SAVED, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
 bw_commit_out:
 
        return ret;
@@ -7992,6 +8458,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
        /* Set default capability flags */
        pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
                    I40E_FLAG_MSI_ENABLED     |
+                   I40E_FLAG_LINK_POLLING_ENABLED |
                    I40E_FLAG_MSIX_ENABLED;
 
 #ifdef HAVE_IOMMU_PRESENT
@@ -8007,7 +8474,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
        /* Depending on PF configurations, it is possible that the RSS
         * maximum might end up larger than the available queues
         */
-       pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+       pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
        pf->rss_size = 1;
        pf->rss_table_size = pf->hw.func_caps.rss_table_size;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
@@ -8018,32 +8485,33 @@ static int i40e_sw_init(struct i40e_pf *pf)
                pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
        }
        /* MFP mode enabled */
-       if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+       if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
                pf->flags |= I40E_FLAG_MFP_ENABLED;
+
                dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
-               if (i40e_get_npar_bw_setting(pf))
+               if (i40e_get_partition_bw_setting(pf)) {
                        dev_warn(&pf->pdev->dev,
-                                "Could not get NPAR bw settings\n");
-               else
+                                "Could not get partition bw settings\n");
+               } else {
                        dev_info(&pf->pdev->dev,
-                                "Min BW = %8.8x, Max BW = %8.8x\n",
-                                pf->npar_min_bw, pf->npar_max_bw);
+                                "Partition BW Min = %8.8x, Max = %8.8x\n",
+                                pf->min_bw, pf->max_bw);
+
+                       /* nudge the Tx scheduler */
+                       i40e_set_partition_bw_setting(pf);
+               }
        }
 
        if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
            (pf->hw.func_caps.fd_filters_best_effort > 0)) {
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-               /* Setup a counter for fd_atr per PF */
-               pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
-               if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
-                       pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-                       /* Setup a counter for fd_sb per PF */
-                       pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
-               } else {
+               if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+                   pf->hw.num_partitions > 1)
                        dev_info(&pf->pdev->dev,
                                 "Flow Director Sideband mode Disabled in MFP mode\n");
-               }
+               else
+                       pf->flags |= I40E_FLAG_FD_SB_ENABLED;
                pf->fdir_pf_filter_count =
                                 pf->hw.func_caps.fd_filters_guaranteed;
                pf->hw.fdir_shared_filter_count =
@@ -8053,13 +8521,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
        if (pf->hw.func_caps.vmdq) {
                pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
                pf->flags |= I40E_FLAG_VMDQ_ENABLED;
-               pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+               pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
        }
 
 #ifdef I40E_FCOE
-       err = i40e_init_pf_fcoe(pf);
-       if (err)
-               dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
+       i40e_init_pf_fcoe(pf);
 
 #endif /* I40E_FCOE */
 #ifdef CONFIG_PCI_IOV
@@ -8095,6 +8561,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
        pf->lan_veb = I40E_NO_VEB;
        pf->lan_vsi = I40E_NO_VSI;
 
+       /* By default FW has this off for performance reasons */
+       pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
        /* set up queue assignment tracking */
        size = sizeof(struct i40e_lump_tracking)
                + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
@@ -8110,10 +8579,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
 
        mutex_init(&pf->switch_mutex);
 
-       /* If NPAR is enabled nudge the Tx scheduler */
-       if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
-               i40e_set_npar_bw_setting(pf);
-
 sw_init_done:
        return err;
 }
@@ -8151,7 +8616,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
                pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
                pf->fdir_pf_active_filters = 0;
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-               dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                       dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
                /* if ATR was auto disabled it can be re-enabled. */
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
@@ -8166,8 +8632,12 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
  * @netdev: ptr to the netdev being adjusted
  * @features: the feature set that the stack is suggesting
  **/
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static int i40e_set_features(struct net_device *netdev, u32 features)
+#else
 static int i40e_set_features(struct net_device *netdev,
                             netdev_features_t features)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -8186,7 +8656,7 @@ static int i40e_set_features(struct net_device *netdev,
        need_reset = i40e_set_ntuple(pf, features);
 
        if (need_reset)
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
        return 0;
 }
@@ -8234,7 +8704,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
 
        /* Check if port already exists */
        if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-               netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+               netdev_info(netdev, "vxlan port %d already offloaded\n",
+                           ntohs(port));
                return;
        }
 
@@ -8242,15 +8713,14 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        next_idx = i40e_get_vxlan_port_idx(pf, 0);
 
        if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-               netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+               netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
                            ntohs(port));
                return;
        }
 
        /* New port: add it and mark its index in the bitmap */
        pf->vxlan_ports[next_idx] = port;
-       pf->pending_vxlan_bitmap |= (1 << next_idx);
-
+       pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
        pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 }
 
@@ -8279,12 +8749,10 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
                 * and make it pending
                 */
                pf->vxlan_ports[idx] = 0;
-
-               pf->pending_vxlan_bitmap |= (1 << idx);
-
+               pf->pending_vxlan_bitmap |= BIT_ULL(idx);
                pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
        } else {
-               netdev_warn(netdev, "Port %d was not found, not deleting\n",
+               netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
                            ntohs(port));
        }
 }
@@ -8357,20 +8825,81 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm,
        return err;
 }
 
+#ifdef HAVE_NDO_FEATURES_CHECK
+#define I40E_MAX_TUNNEL_HDR_LEN 80
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+                                            struct net_device *dev,
+                                            netdev_features_t features)
+{
+       u8 protocol = 0;
+
+       if (!skb->encapsulation)
+               return features;
+
+       /* prevent tunnel headers that are too long to offload from
+        * being sent to the hardware
+        */
+       if (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+           I40E_MAX_TUNNEL_HDR_LEN)
+               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+       /* this is a somewhat temporary patch to prevent the driver
+        * from trying to offload tunnels it cannot support
+        * currently the only supported tunnel is VxLAN,
+        * this code looks like vxlan_features_check but is not
+        * the same.
+        */
+
+       switch (vlan_get_protocol(skb)) {
+       case htons(ETH_P_IP):
+               protocol = ip_hdr(skb)->protocol;
+               break;
+       case htons(ETH_P_IPV6):
+               protocol = ipv6_hdr(skb)->nexthdr;
+               break;
+       default:
+               return features;;
+       }
+
+       if ((protocol != IPPROTO_UDP) ||
+           (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+            skb->inner_protocol != htons(ETH_P_TEB) ||
+            (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+             sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+       
+       return features;
+}
+
+#endif /* HAVE_NDO_FEATURES_CHECK */
 #ifndef USE_DEFAULT_FDB_DEL_DUMP
 #ifdef USE_CONST_DEV_UC_CHAR
+#ifdef HAVE_NDO_FDB_ADD_VID
 static int i40e_ndo_fdb_del(struct ndmsg *ndm,
                             struct net_device *dev,
                             const unsigned char *addr,
-#ifdef HAVE_NDO_FDB_ADD_VID
                             u16 vid)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+                            struct net_device *dev,
+                            const unsigned char *addr)
 #endif
 #else
+#ifdef HAVE_NDO_FDB_ADD_VID
 static int i40e_ndo_fdb_del(struct ndmsg *ndm,
                             struct net_device *dev,
                             unsigned char *addr,
-#ifdef HAVE_NDO_FDB_ADD_VID
                             u16 vid)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+                            struct net_device *dev,
+                            unsigned char *addr)
 #endif
 #endif
 {
@@ -8423,8 +8952,14 @@ static int i40e_ndo_fdb_dump(struct sk_buff *skb,
  * allow rebuild of the components with required hardware
  * bridge mode enabled.
  **/
+#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+                                  struct nlmsghdr *nlh,
+                                  u16 flags)
+#else
 static int i40e_ndo_bridge_setlink(struct net_device *dev,
                                   struct nlmsghdr *nlh)
+#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
@@ -8475,7 +9010,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
                                pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
                        else
                                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+                       i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
                        break;
                }
        }
@@ -8490,18 +9025,24 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
  * @seq: RTNL message seq #
  * @dev: the netdev being configured
  * @filter_mask: unused
+ * @nlflags: netlink flags passed in
  *
  * Return the mode in which the hardware bridge is operating in
  * i.e VEB or VEPA.
  **/
-#ifdef HAVE_BRIDGE_FILTER
+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                                  struct net_device *dev,
+                                  u32 __always_unused filter_mask,
+                                  int nlflags)
+#elif defined(HAVE_BRIDGE_FILTER)
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                   struct net_device *dev,
                                   u32 __always_unused filter_mask)
 #else
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                   struct net_device *dev)
-#endif /* HAVE_BRIDGE_FILTER */
+#endif /* NDO_BRIDGE_STUFF */
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
@@ -8522,11 +9063,19 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        if (!veb)
                return 0;
 
-#ifdef HAVE_NDO_FDB_ADD_VID
-       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, 0, 0);
+#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+                                      0, 0, nlflags, filter_mask, NULL);
+#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS)
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+                                      0, 0, nlflags);
+#elif defined(HAVE_NDO_FDB_ADD_VID) || \
+       defined NDO_BRIDGE_GETLINK_HAS_FILTER_MASK_PARAM
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+                                      0, 0);
 #else
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
-#endif
+#endif /* HAVE_NDO_BRIDGE_XX */
 }
 #endif /* HAVE_BRIDGE_ATTRIBS */
 #endif /* HAVE_FDB_OPS */
@@ -8564,9 +9113,6 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_fcoe_enable        = i40e_fcoe_enable,
        .ndo_fcoe_disable       = i40e_fcoe_disable,
 #endif
-#ifdef HAVE_NDO_SET_FEATURES
-       .ndo_set_features       = i40e_set_features,
-#endif /* HAVE_NDO_SET_FEATURES */
 #ifdef IFLA_VF_MAX
        .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
@@ -8596,11 +9142,24 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_fdb_del            = i40e_ndo_fdb_del,
        .ndo_fdb_dump           = i40e_ndo_fdb_dump,
 #endif
+#ifdef HAVE_NDO_FEATURES_CHECK
+       .ndo_features_check     = i40e_features_check,
+#endif /* HAVE_NDO_FEATURES_CHECK */
 #ifdef HAVE_BRIDGE_ATTRIBS
        .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
        .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
 #endif /* HAVE_BRIDGE_ATTRIBS */
 #endif /* HAVE_FDB_OPS */
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext i40e_netdev_ops_ext = {
+       .size                   = sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#ifdef HAVE_NDO_SET_FEATURES
+       .ndo_set_features       = i40e_set_features,
+#endif /* HAVE_NDO_SET_FEATURES */
 };
 
 #endif /* HAVE_NET_DEVICE_OPS */
@@ -8616,6 +9175,9 @@ static void i40e_assign_netdev_ops(struct net_device *dev)
 {
 #ifdef HAVE_NET_DEVICE_OPS
        dev->netdev_ops = &i40e_netdev_ops;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       set_netdev_ops_ext(dev, &i40e_netdev_ops_ext);
+#endif
 #else /* HAVE_NET_DEVICE_OPS */
        dev->open = i40e_open;
        dev->stop = i40e_close;
@@ -8661,6 +9223,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        struct net_device *netdev;
        u8 mac_addr[ETH_ALEN];
        int etherdev_size;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       u32 hw_features;
+#endif
 
        etherdev_size = sizeof(struct i40e_netdev_priv);
        netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
@@ -8713,14 +9278,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                           NETIF_F_RXHASH              |
 #endif /* NETIF_F_RXHASH */
                           0;
-#ifdef ESX55
-       /*
-        * We want load balancer to call apply_filter for mac addresses applied
-        * to default queue to have successful VF to emulated communication
-        */
-       if ((vsi->type == I40E_VSI_MAIN) && (pf->num_req_vfs > 0))
-               netdev->features |= NETIF_F_DEFQ_L2_FLTR;
-#endif
 #if defined(HAVE_NDO_SET_FEATURES) || defined(ETHTOOL_GRXRINGS)
 
        if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -8729,7 +9286,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
 
 #ifdef HAVE_NDO_SET_FEATURES
        /* copy netdev features into list of user selectable features */
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       hw_features = get_netdev_hw_features(netdev);
+       hw_features |= netdev->features;
+       set_netdev_hw_features(netdev, hw_features);
+#else
        netdev->hw_features |= netdev->features;
+#endif
 #else
 #ifdef NETIF_F_GRO
        netdev->features |= NETIF_F_GRO;
@@ -8744,17 +9307,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                 * default a MAC-VLAN filter that accepts any tagged packet
                 * which must be replaced by a normal filter.
                 */
-               if (!i40e_rm_default_mac_filter(vsi, mac_addr))
+               if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
+                       spin_lock_bh(&vsi->mac_filter_list_lock);
                        i40e_add_filter(vsi, mac_addr,
                                        I40E_VLAN_ANY, false, true);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+               }
        } else {
                /* relate the VSI_VMDQ name to the VSI_MAIN name */
                snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
                         pf->vsi[pf->lan_vsi]->netdev->name);
                random_ether_addr(mac_addr);
+
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
        }
+
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        ether_addr_copy(netdev->dev_addr, mac_addr);
 #ifdef ETHTOOL_GPERMADDR
@@ -8819,7 +9391,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
  **/
 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
 {
-#ifdef HAVE_BRIDGE_ATTRIBS
        struct i40e_veb *veb;
        struct i40e_pf *pf = vsi->back;
 
@@ -8828,13 +9399,27 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
                return 1;
 
        veb = pf->veb[vsi->veb_idx];
+       if (!veb) {
+               dev_info(&pf->pdev->dev,
+                        "There is no veb associated with the bridge\n");
+               return -ENOENT;
+       }
+
+#ifdef HAVE_BRIDGE_ATTRIBS
        /* Uplink is a bridge in VEPA mode */
-       if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
+       if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
                return 0;
-
+       } else {
+               /* Uplink is a bridge in VEB mode */
+               return 1;
+       }
+#else
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               return 1;
 #endif
-       /* Uplink is a bridge in VEB mode */
-       return 1;
+
+       /* VEPA is now default bridge, so return 0 */
+       return 0;
 }
 
 /**
@@ -8847,10 +9432,13 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
 static int i40e_add_vsi(struct i40e_vsi *vsi)
 {
        int ret = -ENODEV;
-       struct i40e_mac_filter *f, *ftmp;
+       u8 laa_macaddr[ETH_ALEN];
+       bool found_laa_mac_filter = false;
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vsi_context ctxt;
+       struct i40e_mac_filter *f, *ftmp;
+
        u8 enabled_tc = 0x1; /* TC0 enabled */
        int f_count = 0;
        u32 val;
@@ -8870,11 +9458,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ctxt.flags = I40E_AQ_VSI_TYPE_PF;
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "couldn't get PF vsi config, err %d, aq_err %d\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                "couldn't get PF vsi config, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
                        return -ENOENT;
                }
-               memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+               vsi->info = ctxt.info;
                vsi->info.valid_sections = 0;
 
                vsi->seid = ctxt.seid;
@@ -8893,8 +9483,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
                        if (ret) {
                                dev_info(&pf->pdev->dev,
-                                        "update vsi failed, aq_err=%d\n",
-                                        pf->hw.aq.asq_last_status);
+                                        "update vsi failed, err %s aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
                                ret = -ENOENT;
                                goto err;
                        }
@@ -8911,9 +9503,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        ret = i40e_vsi_config_tc(vsi, enabled_tc);
                        if (ret) {
                                dev_info(&pf->pdev->dev,
-                                        "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
-                                        enabled_tc, ret,
-                                        pf->hw.aq.asq_last_status);
+                                        "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+                                        enabled_tc,
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
                                ret = -ENOENT;
                        }
                }
@@ -8928,9 +9522,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
                    (i40e_is_vsi_uplink_mode_veb(vsi))) {
                        ctxt.info.valid_sections |=
-                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+                            cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
                        ctxt.info.switch_id =
-                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+                          cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
                }
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
@@ -9006,12 +9600,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
                if (ret) {
                        dev_info(&vsi->back->pdev->dev,
-                                "add vsi failed, aq_err=%d\n",
-                                vsi->back->hw.aq.asq_last_status);
+                                "add vsi failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
                        ret = -ENOENT;
                        goto err;
                }
-               memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+               vsi->info = ctxt.info;
                vsi->info.valid_sections = 0;
                vsi->seid = ctxt.seid;
                vsi->id = ctxt.vsi_number;
@@ -9021,32 +9617,41 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                                 "Note: VSI source pruning is not being set correctly by FW\n");
        }
 
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        /* If macvlan filters already exist, force them to get loaded */
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
                f->changed = true;
                f_count++;
 
+               /* Expected to have only one MAC filter entry for LAA in list */
                if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
-                       struct i40e_aqc_remove_macvlan_element_data element;
+                       ether_addr_copy(laa_macaddr, f->macaddr);
+                       found_laa_mac_filter = true;
+               }
+       }
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
-                       memset(&element, 0, sizeof(element));
-                       ether_addr_copy(element.mac_addr, f->macaddr);
-                       element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
-                       ret = i40e_aq_remove_macvlan(hw, vsi->seid,
-                                                    &element, 1, NULL);
-                       if (ret) {
-                               /* some older FW has a different default */
-                               element.flags |=
-                                              I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
-                               i40e_aq_remove_macvlan(hw, vsi->seid,
-                                                      &element, 1, NULL);
-                       }
+       if (found_laa_mac_filter) {
+               struct i40e_aqc_remove_macvlan_element_data element;
 
-                       i40e_aq_mac_address_write(hw,
-                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
-                                                 f->macaddr, NULL);
+               memset(&element, 0, sizeof(element));
+               ether_addr_copy(element.mac_addr, laa_macaddr);
+               element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+               ret = i40e_aq_remove_macvlan(hw, vsi->seid,
+                                            &element, 1, NULL);
+               if (ret) {
+                       /* some older FW has a different default */
+                       element.flags |=
+                                      I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+                       i40e_aq_remove_macvlan(hw, vsi->seid,
+                                              &element, 1, NULL);
                }
+
+               i40e_aq_mac_address_write(hw,
+                                         I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                         laa_macaddr, NULL);
        }
+
        if (f_count) {
                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
                pf->flags |= I40E_FLAG_FILTER_SYNC;
@@ -9056,8 +9661,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
        ret = i40e_vsi_get_bw_info(vsi);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get vsi bw info, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
+                        "couldn't get vsi bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                /* VSI is already added so not tearing that up */
                ret = 0;
        }
@@ -9108,10 +9714,13 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                i40e_vsi_disable_irq(vsi);
        }
 
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
                i40e_del_filter(vsi, f->macaddr, f->vlan,
                                f->is_vf, f->is_netdev);
-       i40e_sync_vsi_filters(vsi);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       i40e_sync_vsi_filters(vsi, false);
 
        i40e_vsi_delete(vsi);
        i40e_vsi_free_q_vectors(vsi);
@@ -9237,7 +9846,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
        ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
        if (ret < 0) {
                dev_info(&pf->pdev->dev,
-                        "failed to get tracking for %d queues for VSI %d err=%d\n",
+                        "failed to get tracking for %d queues for VSI %d err %d\n",
                         vsi->alloc_queue_pairs, vsi->seid, ret);
                goto err_vsi;
        }
@@ -9338,8 +9947,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                if (veb) {
                        if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
                                dev_info(&vsi->back->pdev->dev,
-                                        "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
-                                        __func__);
+                                        "New VSI creation error, uplink seid of LAN VSI expected.\n");
                                return NULL;
                        }
 #ifdef HAVE_BRIDGE_ATTRIBS
@@ -9482,8 +10090,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
                                                  &bw_data, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "query veb bw config failed, aq_err=%d\n",
-                        hw->aq.asq_last_status);
+                        "query veb bw config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
                goto out;
        }
 
@@ -9491,8 +10100,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
                                                   &ets_data, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "query veb bw ets config failed, aq_err=%d\n",
-                        hw->aq.asq_last_status);
+                        "query veb bw ets config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
                goto out;
        }
 
@@ -9676,36 +10286,40 @@ void i40e_veb_release(struct i40e_veb *veb)
  **/
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
 {
-       bool is_default = veb->pf->cur_promisc;
+       struct i40e_pf *pf = veb->pf;
+       bool is_default = pf->cur_promisc;
        bool is_cloud = false;
        int ret;
 
        /* get a VEB from the hardware */
-       ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+       ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
                              veb->enabled_tc, is_default,
                              is_cloud, &veb->seid, NULL);
        if (ret) {
-               dev_info(&veb->pf->pdev->dev,
-                        "couldn't add VEB, err %d, aq_err %d\n",
-                        ret, veb->pf->hw.aq.asq_last_status);
+               dev_info(&pf->pdev->dev,
+                        "couldn't add VEB, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EPERM;
        }
 
        /* get statistics counter */
-       ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+       ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
                                         &veb->stats_idx, NULL, NULL, NULL);
        if (ret) {
-               dev_info(&veb->pf->pdev->dev,
-                        "couldn't get VEB statistics idx, err %d, aq_err %d\n",
-                        ret, veb->pf->hw.aq.asq_last_status);
+               dev_info(&pf->pdev->dev,
+                        "couldn't get VEB statistics idx, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EPERM;
        }
        ret = i40e_veb_get_bw_info(veb);
        if (ret) {
-               dev_info(&veb->pf->pdev->dev,
-                        "couldn't get VEB bw info, err %d, aq_err %d\n",
-                        ret, veb->pf->hw.aq.asq_last_status);
-               i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+               dev_info(&pf->pdev->dev,
+                        "couldn't get VEB bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
                return -ENOENT;
        }
 
@@ -9911,8 +10525,10 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
                                                &next_seid, NULL);
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "get switch config failed %d aq_err=%x\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                "get switch config failed err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
                        kfree(aq_buf);
                        return -ENOENT;
                }
@@ -9953,8 +10569,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
        ret = i40e_fetch_switch_configuration(pf, false);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't fetch switch config, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
+                        "couldn't fetch switch config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return ret;
        }
        i40e_pf_reset_stats(pf);
@@ -9983,6 +10600,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
        } else {
                /* force a reset of TC and queue layout configurations */
                u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+
                pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
                pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
                i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
@@ -10006,7 +10624,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
                i40e_config_rss(pf);
 
        /* fill in link information and enable LSE reporting */
-       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+       i40e_update_link_info(&pf->hw);
        i40e_link_event(pf);
 
        /* Initialize user-specific link properties */
@@ -10126,8 +10744,14 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
        }
 
        pf->queues_left = queues_left;
+       dev_dbg(&pf->pdev->dev,
+               "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
+               pf->hw.func_caps.num_tx_qp,
+               !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+               pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
+               pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
 #ifdef I40E_FCOE
-       dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
+       dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
 #endif
 }
 
@@ -10194,6 +10818,7 @@ static void i40e_print_features(struct i40e_pf *pf)
        }
        if (pf->flags & I40E_FLAG_DCB_CAPABLE)
                buf += sprintf(buf, "DCB ");
+       buf += sprintf(buf, "VxLAN ");
 #ifdef HAVE_PTP_1588_CLOCK
        if (pf->flags & I40E_FLAG_PTP)
                buf += sprintf(buf, "PTP ");
@@ -10202,6 +10827,10 @@ static void i40e_print_features(struct i40e_pf *pf)
        if (pf->flags & I40E_FLAG_FCOE_ENABLED)
                buf += sprintf(buf, "FCOE ");
 #endif
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               buf += sprintf(buf, "VEB ");
+       else
+               buf += sprintf(buf, "VEPA ");
 
        BUG_ON(buf > (string + INFO_STRING_LEN));
        dev_info(&pf->pdev->dev, "%s\n", string);
@@ -10231,11 +10860,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct i40e_hw *hw;
        static u16 pfs_found;
        u16 wol_nvm_bits;
-       u32 ioremap_len;
        u16 link_status;
        int err = 0;
        u32 len;
        u32 i;
+       u8 set_fc_aq_fail;
 
        err = pci_enable_device_mem(pdev);
        if (err)
@@ -10281,15 +10910,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw = &pf->hw;
        hw->back = pf;
 
-       ioremap_len = min_t(int, pci_resource_len(pdev, 0),
-                           I40E_MAX_CSR_SPACE);
+       pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
+                               I40E_MAX_CSR_SPACE);
 
-       hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
+       hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
        if (!hw->hw_addr) {
                err = -EIO;
                dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
                         (unsigned int)pci_resource_start(pdev, 0),
-                        (unsigned int)pci_resource_len(pdev, 0), err);
+                        pf->ioremap_len, err);
                goto err_ioremap;
        }
        hw->vendor_id = pdev->vendor;
@@ -10326,7 +10955,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = i40e_init_shared_code(hw);
        if (err) {
-               dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+               dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", err);
                goto err_pf_reset;
        }
 
@@ -10334,7 +10963,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pf->hw.fc.requested_mode = I40E_FC_NONE;
 
        err = i40e_init_adminq(hw);
-       dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+
+       /* provide nvm, fw, api versions */
+       dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
+                hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
+                hw->aq.api_maj_ver, hw->aq.api_min_ver,
+                i40e_nvm_version_str(hw));
+
        if (err) {
                dev_info(&pdev->dev,
                         "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
@@ -10436,11 +11071,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        INIT_WORK(&pf->service_task, i40e_service_task);
        clear_bit(__I40E_SERVICE_SCHED, &pf->state);
        pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
-       pf->link_check_timeout = jiffies;
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
+       if (BIT_ULL(hw->port) & wol_nvm_bits || hw->partition_id != 1)
                pf->wol_en = false;
        else
                pf->wol_en = true;
@@ -10448,7 +11082,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* set up the main switch operations */
        i40e_determine_queue_usage(pf);
-       i40e_init_interrupt_scheme(pf);
+       err = i40e_init_interrupt_scheme(pf);
+       if (err)
+               goto err_switch_setup;
 
        /* The number of VSIs reported by the FW is the minimum guaranteed
         * to us; HW supports far more and we share the remaining pool with
@@ -10486,6 +11122,25 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
                goto err_vsis;
        }
+
+       /* Make sure flow control is set according to current settings */
+       err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+       if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+               dev_dbg(&pf->pdev->dev,
+                        "Set fc with err %s aq_err %s on get_phy_cap\n",
+                        i40e_stat_str(hw, err),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+       if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+               dev_dbg(&pf->pdev->dev,
+                        "Set fc with err %s aq_err %s on set_phy_config\n",
+                        i40e_stat_str(hw, err),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+       if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+               dev_dbg(&pf->pdev->dev,
+                        "Set fc with err %s aq_err %s on get_link_info\n",
+                        i40e_stat_str(hw, err),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+
        /* if FDIR VSI was set up, start it now */
        for (i = 0; i < pf->num_alloc_vsi; i++) {
                if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -10501,15 +11156,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                       I40E_AQ_EVENT_LINK_UPDOWN |
                                       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
        if (err)
-               dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+               dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
            (pf->hw.aq.fw_maj_ver < 4)) {
                msleep(75);
                err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (err)
-                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                                pf->hw.aq.asq_last_status);
+                       dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, err),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
        }
        /* The main driver is (mostly) up and happy. We need to set this state
         * before setting up the misc vector or we get a race and the vector
@@ -10566,7 +11225,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 #endif /* CONFIG_PCI_IOV */
 
        pfs_found++;
-
        i40e_dbg_pf_init(pf);
 
        /* tell the firmware that we're starting */
@@ -10581,35 +11239,81 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        i40e_fcoe_vsi_setup(pf);
 
 #endif
-       /* Get the negotiated link width and speed from PCI config space */
-       pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
-
-       i40e_set_pci_config_data(hw, link_status);
+#define PCI_SPEED_SIZE 8
+#define PCI_WIDTH_SIZE 8
+       /* Devices on the IOSF bus do not have this information right
+        * and will report PCI Gen 1 x 1 by default so don't bother
+        * checking them.
+        */
+       if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
+               char speed[PCI_SPEED_SIZE] = "Unknown";
+               char width[PCI_WIDTH_SIZE] = "Unknown";
+
+               /* Get the negotiated link width and speed from PCI config
+                * space */
+               pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
+                                         &link_status);
+
+               i40e_set_pci_config_data(hw, link_status);
+
+               switch (hw->bus.speed) {
+               case i40e_bus_speed_8000:
+                       strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+               case i40e_bus_speed_5000:
+                       strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+               case i40e_bus_speed_2500:
+                       strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+               default:
+                       break;
+               }
+               switch (hw->bus.width) {
+               case i40e_bus_width_pcie_x8:
+                       strncpy(width, "8", PCI_WIDTH_SIZE); break;
+               case i40e_bus_width_pcie_x4:
+                       strncpy(width, "4", PCI_WIDTH_SIZE); break;
+               case i40e_bus_width_pcie_x2:
+                       strncpy(width, "2", PCI_WIDTH_SIZE); break;
+               case i40e_bus_width_pcie_x1:
+                       strncpy(width, "1", PCI_WIDTH_SIZE); break;
+               default:
+                       break;
+               }
 
-       dev_info(&pdev->dev, "PCI-Express: %s %s\n",
-               (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
-                hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
-                hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
-                "Unknown"),
-               (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
-                hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
-                hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
-                hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
-                "Unknown"));
+               dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
+                        speed, width);
 
-       if (hw->bus.width < i40e_bus_width_pcie_x8 ||
-           hw->bus.speed < i40e_bus_speed_8000) {
-               dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
-               dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+               if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+                   hw->bus.speed < i40e_bus_speed_8000) {
+                       dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+                       dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+               }
        }
 
        /* get the requested speeds from the fw */
        err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
        if (err)
-               dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
-                        err);
+               dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
+                       i40e_stat_str(&pf->hw, err),
+                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
 
+       /* get the supported phy types from the fw */
+       err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+       if (err)
+               dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
+                       i40e_stat_str(&pf->hw, err),
+                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+       pf->hw.phy.phy_types = LE32_TO_CPU(abilities.phy_type);
+
+       /* Add a filter to drop all Flow control frames from any VSI from being
+        * transmitted. By doing so we stop a malicious VF from sending out
+        * PAUSE or PFC frames and potentially controlling traffic for other
+        * PF/VF VSIs.
+        * The FW can still send Flow control frames if enabled.
+        */
+       i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+                                                       pf->main_vsi_seid);
+
        /* print a string summarizing features */
        i40e_print_features(pf);
 
@@ -10632,7 +11336,7 @@ err_sw_init:
 err_adminq_setup:
        (void)i40e_shutdown_adminq(hw);
 err_pf_reset:
-       dev_warn(&pdev->dev, "HW reset failed or FW unresponsive, module loaded in debug mode\n");
+       dev_warn(&pdev->dev, "previous errors forcing module to load in debug mode\n");
        i40e_dbg_pf_init(pf);
        set_bit(__I40E_DEBUG_MODE, &pf->state);
        return 0;
@@ -10664,6 +11368,9 @@ static void i40e_remove(struct pci_dev *pdev)
 #endif
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
+#ifdef HAVE_PTP_1588_CLOCK
+       struct i40e_hw *hw = &pf->hw;
+#endif /* HAVE_PTP_1588_CLOCK */
        i40e_status ret_code;
        int i;
 
@@ -10674,6 +11381,10 @@ static void i40e_remove(struct pci_dev *pdev)
 #ifdef HAVE_PTP_1588_CLOCK
        i40e_ptp_stop(pf);
 
+       /* Disable RSS in hw */
+       wr32(hw, I40E_PFQF_HENA(0), 0);
+       wr32(hw, I40E_PFQF_HENA(1), 0);
+
 #endif /* HAVE_PTP_1588_CLOCK */
        /* no more scheduling of any task */
        set_bit(__I40E_DOWN, &pf->state);
@@ -10792,7 +11503,7 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
        int err;
        u32 reg;
 
-       dev_info(&pdev->dev, "%s\n", __func__);
+       dev_dbg(&pdev->dev, "%s\n", __func__);
        if (pci_enable_device_mem(pdev)) {
                dev_info(&pdev->dev,
                         "Cannot re-enable PCI device after reset.\n");
@@ -10832,7 +11543,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
 
-       dev_info(&pdev->dev, "%s\n", __func__);
+       dev_dbg(&pdev->dev, "%s\n", __func__);
        if (test_bit(__I40E_SUSPENDED, &pf->state))
                return;
 
@@ -10852,18 +11563,23 @@ static void i40e_shutdown(struct pci_dev *pdev)
 
        set_bit(__I40E_SUSPENDED, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
-       del_timer_sync(&pf->service_timer);
-       cancel_work_sync(&pf->service_task);
-       i40e_fdir_teardown(pf);
 
-       rtnl_lock();
-       i40e_prep_for_reset(pf);
-       rtnl_unlock();
+       if (!test_bit(__I40E_DEBUG_MODE, &pf->state)) {
+               del_timer_sync(&pf->service_timer);
+               cancel_work_sync(&pf->service_task);
+               i40e_fdir_teardown(pf);
+
+               rtnl_lock();
+               i40e_prep_for_reset(pf);
+               rtnl_unlock();
 
-       wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
-       wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+               wr32(hw, I40E_PFPM_APM,
+                    (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+               wr32(hw, I40E_PFPM_WUFC,
+                    (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
 
-       i40e_clear_interrupt_scheme(pf);
+               i40e_clear_interrupt_scheme(pf);
+       }
 
        if (system_state == SYSTEM_POWER_OFF) {
                pci_wake_from_d3(pdev, pf->wol_en);
@@ -10883,12 +11599,17 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
 
        set_bit(__I40E_SUSPENDED, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
-       rtnl_lock();
-       i40e_prep_for_reset(pf);
-       rtnl_unlock();
 
-       wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
-       wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+       if (!test_bit(__I40E_DEBUG_MODE, &pf->state)) {
+               rtnl_lock();
+               i40e_prep_for_reset(pf);
+               rtnl_unlock();
+
+               wr32(hw, I40E_PFPM_APM,
+                    (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+               wr32(hw, I40E_PFPM_WUFC,
+                    (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+       }
 
        pci_wake_from_d3(pdev, pf->wol_en);
        pci_set_power_state(pdev, PCI_D3hot);
@@ -10914,9 +11635,7 @@ static int i40e_resume(struct pci_dev *pdev)
 
        err = pci_enable_device_mem(pdev);
        if (err) {
-               dev_err(pci_dev_to_dev(pdev),
-                       "%s: Cannot enable PCI device from suspend\n",
-                       __func__);
+               dev_err(pci_dev_to_dev(pdev), "Cannot enable PCI device from suspend\n");
                return err;
        }
        pci_set_master(pdev);
@@ -10990,6 +11709,16 @@ static int __init i40e_init_module(void)
                i40e_driver_string, i40e_driver_version_str);
        pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
 
+       /* we will see if single thread per module is enough for now,
+        * it can't be any worse than using the system workqueue which
+        * was already single threaded
+        */
+       i40e_wq = create_singlethread_workqueue(i40e_driver_name);
+       if (!i40e_wq) {
+               pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
+               return -ENOMEM;
+       }
+
 #if IS_ENABLED(CONFIG_CONFIGFS_FS)
        i40e_configfs_init();
 #endif /* CONFIG_CONFIGFS_FS */
@@ -11007,6 +11736,7 @@ module_init(i40e_init_module);
 static void __exit i40e_exit_module(void)
 {
        pci_unregister_driver(&i40e_driver);
+       destroy_workqueue(i40e_wq);
        i40e_dbg_exit();
 #if IS_ENABLED(CONFIG_CONFIGFS_FS)
        i40e_configfs_exit();
similarity index 76%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_nvm.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_nvm.c
index 1ee071ca1381897abcc542658bc7bf725128eae2..30ceba0ad0f4001737f39ae1a2f3837e0a8d8131 100644 (file)
@@ -62,7 +62,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
        sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
                           I40E_GLNVM_GENS_SR_SIZE_SHIFT);
        /* Switching to words (sr_size contains power of 2KB) */
-       nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+       nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
 
        /* Check if we are in the normal or blank NVM programming mode */
        fla = rd32(hw, I40E_GLNVM_FLA);
@@ -146,8 +146,24 @@ i40e_i40e_acquire_nvm_exit:
  **/
 void i40e_release_nvm(struct i40e_hw *hw)
 {
-       if (!hw->nvm.blank_nvm_mode)
-               i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+       i40e_status ret_code = I40E_SUCCESS;
+       u32 total_delay = 0;
+
+       if (hw->nvm.blank_nvm_mode)
+               return;
+
+       ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
+       /* there are some rare cases when trying to release the resource
+        * results in an admin Q timeout, so handle them correctly
+        */
+       while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+              (total_delay < hw->aq.asq_cmd_timeout)) {
+                       usleep_range(1000, 2000);
+                       ret_code = i40e_aq_release_resource(hw,
+                                               I40E_NVM_RESOURCE_ID, 0, NULL);
+                       total_delay++;
+       }
 }
 
 /**
@@ -215,8 +231,8 @@ i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
        ret_code = i40e_poll_sr_srctl_done_bit(hw);
        if (ret_code == I40E_SUCCESS) {
                /* Write the address and start reading */
-               sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
-                        (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+               sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+                        BIT(I40E_GLNVM_SRCTL_START_SHIFT);
                wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
 
                /* Poll I40E_GLNVM_SRCTL until the done bit is set */
@@ -377,6 +393,10 @@ i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
                                       bool last_command)
 {
        i40e_status ret_code = I40E_ERR_NVM;
+       struct i40e_asq_cmd_details cmd_details;
+
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
 
        /* Here we are checking the SR limit only for the flat memory model.
         * We cannot do it for the module-based model, as we did not acquire
@@ -402,7 +422,7 @@ i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
                ret_code = i40e_aq_read_nvm(hw, module_pointer,
                                            2 * offset,  /*bytes*/
                                            2 * words,   /*bytes*/
-                                           data, last_command, NULL);
+                                           data, last_command, &cmd_details);
 
        return ret_code;
 }
@@ -423,6 +443,10 @@ i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
                                        bool last_command)
 {
        i40e_status ret_code = I40E_ERR_NVM;
+       struct i40e_asq_cmd_details cmd_details;
+
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
 
        /* Here we are checking the SR limit only for the flat memory model.
         * We cannot do it for the module-based model, as we did not acquire
@@ -442,7 +466,7 @@ i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
                ret_code = i40e_aq_update_nvm(hw, module_pointer,
                                              2 * offset,  /*bytes*/
                                              2 * words,   /*bytes*/
-                                             data, last_command, NULL);
+                                             data, last_command, &cmd_details);
 
        return ret_code;
 }
@@ -546,6 +570,7 @@ i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
                /* Read SR page */
                if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
                        u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
                        ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
                        if (ret_code != I40E_SUCCESS) {
                                ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -591,11 +616,13 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
 {
        i40e_status ret_code = I40E_SUCCESS;
        u16 checksum;
+       __le16 le_sum;
 
        ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+       le_sum = CPU_TO_LE16(checksum);
        if (ret_code == I40E_SUCCESS)
                ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
-                                            1, &checksum, true);
+                                            1, &le_sum, true);
 
        return ret_code;
 }
@@ -640,35 +667,41 @@ i40e_validate_nvm_checksum_exit:
 
 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                                                    struct i40e_nvm_access *cmd,
-                                                   u8 *bytes, int *errno);
+                                                   u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
                                                    struct i40e_nvm_access *cmd,
-                                                   u8 *bytes, int *errno);
+                                                   u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
                                                    struct i40e_nvm_access *cmd,
-                                                   u8 *bytes, int *errno);
+                                                   u8 *bytes, int *perrno);
 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
                                                    struct i40e_nvm_access *cmd,
-                                                   int *errno);
+                                                   int *perrno);
 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
                                                   struct i40e_nvm_access *cmd,
-                                                  int *errno);
+                                                  int *perrno);
 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
                                                   struct i40e_nvm_access *cmd,
-                                                  u8 *bytes, int *errno);
+                                                  u8 *bytes, int *perrno);
 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
                                                  struct i40e_nvm_access *cmd,
-                                                 u8 *bytes, int *errno);
-static inline u8 i40e_nvmupd_get_module(u32 val)
+                                                 u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+                                                struct i40e_nvm_access *cmd,
+                                                u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno);
+static INLINE u8 i40e_nvmupd_get_module(u32 val)
 {
        return (u8)(val & I40E_NVM_MOD_PNT_MASK);
 }
-static inline u8 i40e_nvmupd_get_transaction(u32 val)
+static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
 {
        return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
 }
 
-static char *i40e_nvm_update_state_str[] = {
+static const char *i40e_nvm_update_state_str[] = {
        "I40E_NVMUPD_INVALID",
        "I40E_NVMUPD_READ_CON",
        "I40E_NVMUPD_READ_SNT",
@@ -682,6 +715,9 @@ static char *i40e_nvm_update_state_str[] = {
        "I40E_NVMUPD_CSUM_CON",
        "I40E_NVMUPD_CSUM_SA",
        "I40E_NVMUPD_CSUM_LCB",
+       "I40E_NVMUPD_STATUS",
+       "I40E_NVMUPD_EXEC_AQ",
+       "I40E_NVMUPD_GET_AQ_RESULT",
 };
 
 /**
@@ -689,30 +725,60 @@ static char *i40e_nvm_update_state_str[] = {
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * Dispatches command depending on what update state is current
  **/
 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
                                          struct i40e_nvm_access *cmd,
-                                         u8 *bytes, int *errno)
+                                         u8 *bytes, int *perrno)
 {
        i40e_status status;
+       enum i40e_nvmupd_cmd upd_cmd;
 
        /* assume success */
-       *errno = 0;
+       *perrno = 0;
+
+       /* early check for status command and debug msgs */
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+       i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+                  i40e_nvm_update_state_str[upd_cmd],
+                  hw->nvmupd_state,
+                  hw->aq.nvm_release_on_done);
+
+       if (upd_cmd == I40E_NVMUPD_INVALID) {
+               *perrno = -EFAULT;
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_validate_command returns %d errno %d\n",
+                          upd_cmd, *perrno);
+       }
+
+       /* a status request returns immediately rather than
+        * going into the state machine
+        */
+       if (upd_cmd == I40E_NVMUPD_STATUS) {
+               bytes[0] = hw->nvmupd_state;
+               return I40E_SUCCESS;
+       }
 
        switch (hw->nvmupd_state) {
        case I40E_NVMUPD_STATE_INIT:
-               status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
+               status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
                break;
 
        case I40E_NVMUPD_STATE_READING:
-               status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
+               status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
                break;
 
        case I40E_NVMUPD_STATE_WRITING:
-               status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
+               status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+               break;
+
+       case I40E_NVMUPD_STATE_INIT_WAIT:
+       case I40E_NVMUPD_STATE_WRITE_WAIT:
+               status = I40E_ERR_NOT_READY;
+               *perrno = -EBUSY;
                break;
 
        default:
@@ -720,7 +786,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "NVMUPD: no such state %d\n", hw->nvmupd_state);
                status = I40E_NOT_SUPPORTED;
-               *errno = -ESRCH;
+               *perrno = -ESRCH;
                break;
        }
        return status;
@@ -731,28 +797,28 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * Process legitimate commands of the Init state and conditionally set next
  * state. Reject all other commands.
  **/
 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
                                                    struct i40e_nvm_access *cmd,
-                                                   u8 *bytes, int *errno)
+                                                   u8 *bytes, int *perrno)
 {
        i40e_status status = I40E_SUCCESS;
        enum i40e_nvmupd_cmd upd_cmd;
 
-       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
        switch (upd_cmd) {
        case I40E_NVMUPD_READ_SA:
                status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
                if (status) {
-                       *errno = i40e_aq_rc_to_posix(status,
+                       *perrno = i40e_aq_rc_to_posix(status,
                                                     hw->aq.asq_last_status);
                } else {
-                       status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+                       status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
                        i40e_release_nvm(hw);
                }
                break;
@@ -760,10 +826,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
        case I40E_NVMUPD_READ_SNT:
                status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
                if (status) {
-                       *errno = i40e_aq_rc_to_posix(status,
+                       *perrno = i40e_aq_rc_to_posix(status,
                                                     hw->aq.asq_last_status);
                } else {
-                       status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+                       status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
                        if (status)
                                i40e_release_nvm(hw);
                        else
@@ -774,70 +840,83 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
        case I40E_NVMUPD_WRITE_ERA:
                status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
                if (status) {
-                       *errno = i40e_aq_rc_to_posix(status,
+                       *perrno = i40e_aq_rc_to_posix(status,
                                                     hw->aq.asq_last_status);
                } else {
-                       status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
-                       if (status)
+                       status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
+                       if (status) {
                                i40e_release_nvm(hw);
-                       else
+                       } else {
                                hw->aq.nvm_release_on_done = true;
+                               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+                       }
                }
                break;
 
        case I40E_NVMUPD_WRITE_SA:
                status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
                if (status) {
-                       *errno = i40e_aq_rc_to_posix(status,
+                       *perrno = i40e_aq_rc_to_posix(status,
                                                     hw->aq.asq_last_status);
                } else {
-                       status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
-                       if (status)
+                       status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+                       if (status) {
                                i40e_release_nvm(hw);
-                       else
+                       } else {
                                hw->aq.nvm_release_on_done = true;
+                               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+                       }
                }
                break;
 
        case I40E_NVMUPD_WRITE_SNT:
                status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
                if (status) {
-                       *errno = i40e_aq_rc_to_posix(status,
+                       *perrno = i40e_aq_rc_to_posix(status,
                                                     hw->aq.asq_last_status);
                } else {
-                       status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+                       status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
                        if (status)
                                i40e_release_nvm(hw);
                        else
-                               hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+                               hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
                }
                break;
 
        case I40E_NVMUPD_CSUM_SA:
                status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
                if (status) {
-                       *errno = i40e_aq_rc_to_posix(status,
+                       *perrno = i40e_aq_rc_to_posix(status,
                                                     hw->aq.asq_last_status);
                } else {
                        status = i40e_update_nvm_checksum(hw);
                        if (status) {
-                               *errno = hw->aq.asq_last_status ?
+                               *perrno = hw->aq.asq_last_status ?
                                   i40e_aq_rc_to_posix(status,
                                                       hw->aq.asq_last_status) :
                                   -EIO;
                                i40e_release_nvm(hw);
                        } else {
                                hw->aq.nvm_release_on_done = true;
+                               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
                        }
                }
                break;
 
+       case I40E_NVMUPD_EXEC_AQ:
+               status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
+               break;
+
+       case I40E_NVMUPD_GET_AQ_RESULT:
+               status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
+               break;
+
        default:
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "NVMUPD: bad cmd %s in init state\n",
                           i40e_nvm_update_state_str[upd_cmd]);
                status = I40E_ERR_NVM;
-               *errno = -ESRCH;
+               *perrno = -ESRCH;
                break;
        }
        return status;
@@ -848,28 +927,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * NVM ownership is already held.  Process legitimate commands and set any
  * change in state; reject all other commands.
  **/
 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
                                                    struct i40e_nvm_access *cmd,
-                                                   u8 *bytes, int *errno)
+                                                   u8 *bytes, int *perrno)
 {
-       i40e_status status;
+       i40e_status status = I40E_SUCCESS;
        enum i40e_nvmupd_cmd upd_cmd;
 
-       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
        switch (upd_cmd) {
        case I40E_NVMUPD_READ_SA:
        case I40E_NVMUPD_READ_CON:
-               status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+               status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
                break;
 
        case I40E_NVMUPD_READ_LCB:
-               status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+               status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
                i40e_release_nvm(hw);
                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
                break;
@@ -879,7 +958,7 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
                           "NVMUPD: bad cmd %s in reading state.\n",
                           i40e_nvm_update_state_str[upd_cmd]);
                status = I40E_NOT_SUPPORTED;
-               *errno = -ESRCH;
+               *perrno = -ESRCH;
                break;
        }
        return status;
@@ -890,55 +969,68 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * NVM ownership is already held.  Process legitimate commands and set any
  * change in state; reject all other commands
  **/
 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
                                                    struct i40e_nvm_access *cmd,
-                                                   u8 *bytes, int *errno)
+                                                   u8 *bytes, int *perrno)
 {
-       i40e_status status;
+       i40e_status status = I40E_SUCCESS;
        enum i40e_nvmupd_cmd upd_cmd;
        bool retry_attempt = false;
 
-       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
 retry:
        switch (upd_cmd) {
        case I40E_NVMUPD_WRITE_CON:
-               status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+               status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+               if (!status)
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
                break;
 
        case I40E_NVMUPD_WRITE_LCB:
-               status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
-               if (!status)
+               status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+               if (status) {
+                       *perrno = hw->aq.asq_last_status ?
+                                  i40e_aq_rc_to_posix(status,
+                                                      hw->aq.asq_last_status) :
+                                  -EIO;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               } else {
                        hw->aq.nvm_release_on_done = true;
-               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+               }
                break;
 
        case I40E_NVMUPD_CSUM_CON:
                status = i40e_update_nvm_checksum(hw);
                if (status) {
-                       *errno = hw->aq.asq_last_status ?
+                       *perrno = hw->aq.asq_last_status ?
                                   i40e_aq_rc_to_posix(status,
                                                       hw->aq.asq_last_status) :
                                   -EIO;
                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               } else {
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
                }
                break;
 
        case I40E_NVMUPD_CSUM_LCB:
                status = i40e_update_nvm_checksum(hw);
-               if (status)
-                       *errno = hw->aq.asq_last_status ?
+               if (status) {
+                       *perrno = hw->aq.asq_last_status ?
                                   i40e_aq_rc_to_posix(status,
                                                       hw->aq.asq_last_status) :
                                   -EIO;
-               else
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               } else {
                        hw->aq.nvm_release_on_done = true;
-               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+               }
                break;
 
        default:
@@ -946,7 +1038,7 @@ retry:
                           "NVMUPD: bad cmd %s in writing state.\n",
                           i40e_nvm_update_state_str[upd_cmd]);
                status = I40E_NOT_SUPPORTED;
-               *errno = -ESRCH;
+               *perrno = -ESRCH;
                break;
        }
 
@@ -989,16 +1081,16 @@ retry:
  * i40e_nvmupd_validate_command - Validate given command
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * Return one of the valid command types or I40E_NVMUPD_INVALID
  **/
 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
                                                    struct i40e_nvm_access *cmd,
-                                                   int *errno)
+                                                   int *perrno)
 {
        enum i40e_nvmupd_cmd upd_cmd;
-       u8 transaction, module;
+       u8 module, transaction;
 
        /* anything that doesn't match a recognized case is an error */
        upd_cmd = I40E_NVMUPD_INVALID;
@@ -1012,7 +1104,7 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "i40e_nvmupd_validate_command data_size %d\n",
                           cmd->data_size);
-               *errno = -EFAULT;
+               *perrno = -EFAULT;
                return I40E_NVMUPD_INVALID;
        }
 
@@ -1031,6 +1123,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
                case I40E_NVM_SA:
                        upd_cmd = I40E_NVMUPD_READ_SA;
                        break;
+               case I40E_NVM_EXEC:
+                       if (module == 0xf)
+                               upd_cmd = I40E_NVMUPD_STATUS;
+                       else if (module == 0)
+                               upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+                       break;
                }
                break;
 
@@ -1060,21 +1158,155 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
                case (I40E_NVM_CSUM|I40E_NVM_LCB):
                        upd_cmd = I40E_NVMUPD_CSUM_LCB;
                        break;
+               case I40E_NVM_EXEC:
+                       if (module == 0)
+                               upd_cmd = I40E_NVMUPD_EXEC_AQ;
+                       break;
                }
                break;
        }
-       i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
-                  i40e_nvm_update_state_str[upd_cmd],
-                  hw->nvmupd_state,
-                  hw->aq.nvm_release_on_done);
 
-       if (upd_cmd == I40E_NVMUPD_INVALID) {
-               *errno = -EFAULT;
+       return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+                                                struct i40e_nvm_access *cmd,
+                                                u8 *bytes, int *perrno)
+{
+       struct i40e_asq_cmd_details cmd_details;
+       i40e_status status;
+       struct i40e_aq_desc *aq_desc;
+       u32 buff_size = 0;
+       u8 *buff = NULL;
+       u32 aq_desc_len;
+       u32 aq_data_len;
+
+       i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+       aq_desc_len = sizeof(struct i40e_aq_desc);
+       memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+       /* get the aq descriptor */
+       if (cmd->data_size < aq_desc_len) {
                i40e_debug(hw, I40E_DEBUG_NVM,
-                          "i40e_nvmupd_validate_command returns %d errno %d\n",
-                          upd_cmd, *errno);
+                          "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+                          cmd->data_size, aq_desc_len);
+               *perrno = -EINVAL;
+               return I40E_ERR_PARAM;
        }
-       return upd_cmd;
+       aq_desc = (struct i40e_aq_desc *)bytes;
+
+       /* if data buffer needed, make sure it's ready */
+       aq_data_len = cmd->data_size - aq_desc_len;
+       buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
+       if (buff_size) {
+               if (!hw->nvm_buff.va) {
+                       status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+                                                       hw->aq.asq_buf_size);
+                       if (status)
+                               i40e_debug(hw, I40E_DEBUG_NVM,
+                                          "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+                                          status);
+               }
+
+               if (hw->nvm_buff.va) {
+                       buff = hw->nvm_buff.va;
+                       memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+               }
+       }
+
+       /* and away we go! */
+       status = i40e_asq_send_command(hw, aq_desc, buff,
+                                      buff_size, &cmd_details);
+       if (status) {
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+                          i40e_stat_str(hw, status),
+                          i40e_aq_str(hw, hw->aq.asq_last_status));
+               *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno)
+{
+       u32 aq_total_len;
+       u32 aq_desc_len;
+       int remainder;
+       u8 *buff;
+
+       i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+       aq_desc_len = sizeof(struct i40e_aq_desc);
+       aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
+
+       /* check offset range */
+       if (cmd->offset > aq_total_len) {
+               i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+                          __func__, cmd->offset, aq_total_len);
+               *perrno = -EINVAL;
+               return I40E_ERR_PARAM;
+       }
+
+       /* check copylength range */
+       if (cmd->data_size > (aq_total_len - cmd->offset)) {
+               int new_len = aq_total_len - cmd->offset;
+
+               i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+                          __func__, cmd->data_size, new_len);
+               cmd->data_size = new_len;
+       }
+
+       remainder = cmd->data_size;
+       if (cmd->offset < aq_desc_len) {
+               u32 len = aq_desc_len - cmd->offset;
+
+               len = min(len, cmd->data_size);
+               i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+                          __func__, cmd->offset, cmd->offset + len);
+
+               buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+               memcpy(bytes, buff, len);
+
+               bytes += len;
+               remainder -= len;
+               buff = hw->nvm_buff.va;
+       } else {
+               buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+       }
+
+       if (remainder > 0) {
+               int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+               i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+                          __func__, start_byte, start_byte + remainder);
+               memcpy(bytes, buff, remainder);
+       }
+
+       return I40E_SUCCESS;
 }
 
 /**
@@ -1082,14 +1314,15 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * cmd structure contains identifiers and data buffer
  **/
 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
                                                  struct i40e_nvm_access *cmd,
-                                                 u8 *bytes, int *errno)
+                                                 u8 *bytes, int *perrno)
 {
+       struct i40e_asq_cmd_details cmd_details;
        i40e_status status;
        u8 module, transaction;
        bool last;
@@ -1098,8 +1331,11 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
        module = i40e_nvmupd_get_module(cmd->config);
        last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
 
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
        status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
-                                 bytes, last, NULL);
+                                 bytes, last, &cmd_details);
        if (status) {
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
@@ -1107,7 +1343,7 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "i40e_nvmupd_nvm_read status %d aq %d\n",
                           status, hw->aq.asq_last_status);
-               *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+               *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
        }
 
        return status;
@@ -1117,23 +1353,28 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
  * i40e_nvmupd_nvm_erase - Erase an NVM module
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * module, offset, data_size and data are in cmd structure
  **/
 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
                                                   struct i40e_nvm_access *cmd,
-                                                  int *errno)
+                                                  int *perrno)
 {
        i40e_status status = I40E_SUCCESS;
+       struct i40e_asq_cmd_details cmd_details;
        u8 module, transaction;
        bool last;
 
        transaction = i40e_nvmupd_get_transaction(cmd->config);
        module = i40e_nvmupd_get_module(cmd->config);
        last = (transaction & I40E_NVM_LCB);
+
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
        status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
-                                  last, NULL);
+                                  last, &cmd_details);
        if (status) {
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
@@ -1141,7 +1382,7 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "i40e_nvmupd_nvm_erase status %d aq %d\n",
                           status, hw->aq.asq_last_status);
-               *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+               *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
        }
 
        return status;
@@ -1152,15 +1393,16 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
  * @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
  *
  * module, offset, data_size and data are in cmd structure
  **/
 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
                                                   struct i40e_nvm_access *cmd,
-                                                  u8 *bytes, int *errno)
+                                                  u8 *bytes, int *perrno)
 {
        i40e_status status = I40E_SUCCESS;
+       struct i40e_asq_cmd_details cmd_details;
        u8 module, transaction;
        bool last;
 
@@ -1168,8 +1410,12 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
        module = i40e_nvmupd_get_module(cmd->config);
        last = (transaction & I40E_NVM_LCB);
 
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
        status = i40e_aq_update_nvm(hw, module, cmd->offset,
-                                   (u16)cmd->data_size, bytes, last, NULL);
+                                   (u16)cmd->data_size, bytes, last,
+                                   &cmd_details);
        if (status) {
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
@@ -1177,7 +1423,7 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
                i40e_debug(hw, I40E_DEBUG_NVM,
                           "i40e_nvmupd_nvm_write status %d aq %d\n",
                           status, hw->aq.asq_last_status);
-               *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+               *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
        }
 
        return status;
similarity index 99%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_osdep.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_osdep.h
index 515cb2fa7af95922f025fbae8f94242fbabf014b..0a515d2cb2fbc525a0aff33f78fac5f0167b8f59 100644 (file)
@@ -67,14 +67,12 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
 
 #define hw_dbg(hw, S, A...)    do {} while (0)
 
-#define I40E_TRACE_REG 0
 #define wr32(a, reg, value)    writel((value), ((a)->hw_addr + (reg)))
 #define rd32(a, reg)           readl((a)->hw_addr + (reg))
 
 #define wr64(a, reg, value)    writeq((value), ((a)->hw_addr + (reg)))
 #define rd64(a, reg)           readq((a)->hw_addr + (reg))
 #define i40e_flush(a)          readl((a)->hw_addr + I40E_GLGEN_STAT)
-
 /* memory allocation tracking */
 struct i40e_dma_mem {
        void *va;
similarity index 95%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_prototype.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_prototype.h
index f64ae991a9253531fa645a82026cf359f530edf2..d1fff9bdc0ed260d4e11e570447f0faecb3c6a78 100644 (file)
@@ -70,6 +70,8 @@ void i40e_idle_aq(struct i40e_hw *hw);
 void i40e_resume_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
@@ -136,6 +138,12 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
                u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
                u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+                               u16 seid, bool enable, u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+                               u16 seid, bool enable, u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
                                struct i40e_vsi_context *vsi_ctx,
                                struct i40e_asq_cmd_details *cmd_details);
@@ -195,6 +203,9 @@ i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw,
                                u8 cmd_flags, void *data, u16 buf_size,
                                u16 element_count,
                                struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw,
+                               void *buff, u16 buff_size,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
                                void *buff, u16 buff_size, u16 *data_size,
                                enum i40e_admin_queue_opc list_type_opc,
@@ -377,7 +388,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw);
 i40e_status i40e_pf_reset(struct i40e_hw *hw);
 void i40e_clear_hw(struct i40e_hw *hw);
 void i40e_clear_pxe_mode(struct i40e_hw *hw);
-bool i40e_get_link_status(struct i40e_hw *hw);
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+i40e_status i40e_update_link_info(struct i40e_hw *hw);
 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
                u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
@@ -446,4 +458,11 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
                                u16 vsi_seid, u16 queue, bool is_add,
                                struct i40e_control_filter_stats *stats,
                                struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+                               u8 table_id, u32 start_index, u16 buff_size,
+                               void *buff, u16 *ret_buff_size,
+                               u8 *ret_next_table, u32 *ret_next_index,
+                               struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+                                                   u16 vsi_seid);
 #endif /* _I40E_PROTOTYPE_H_ */
similarity index 90%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_ptp.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ptp.c
index ac1d8273296717600410d48a0c201a8531086e77..a6dce3ee880d82e721e21715e047262b7e687d50 100644 (file)
@@ -44,9 +44,8 @@
 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
 #define I40E_PTP_1GB_INCVAL  0x2000000000ULL
 
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  (0x1 << \
-                                       I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (2 << \
                                        I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 
 /**
@@ -58,7 +57,7 @@
  * timespec. However, since the registers are 64 bits of nanoseconds, we must
  * convert the result to a timespec before we can return.
  **/
-static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts)
 {
        struct i40e_hw *hw = &pf->hw;
        u32 hi, lo;
@@ -70,7 +69,7 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
 
        ns = (((u64)hi) << 32) | lo;
 
-       *ts = ns_to_timespec(ns);
+       *ts = ns_to_timespec64(ns);
 }
 
 /**
@@ -82,16 +81,16 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
  * we receive a timespec from the stack, we must convert that timespec into
  * nanoseconds before programming the registers.
  **/
-static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts)
 {
        struct i40e_hw *hw = &pf->hw;
-       u64 ns = timespec_to_ns(ts);
+       u64 ns = timespec64_to_ns(ts);
 
        /* The timer will not update until the high register is written, so
         * write the low register first.
         */
-       wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
-       wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
+       wr32(hw, I40E_PRTTSYN_TIME_L, (u32)ns);
+       wr32(hw, I40E_PRTTSYN_TIME_H, (u32)(ns >> 32));
 }
 
 /**
@@ -143,8 +142,8 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
        else
                adj += diff;
 
-       wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
-       wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
+       wr32(hw, I40E_PRTTSYN_INC_L, (u32)adj);
+       wr32(hw, I40E_PRTTSYN_INC_H, (u32)(adj >> 32));
 
        return 0;
 }
@@ -160,29 +159,28 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-       struct timespec now, then = ns_to_timespec(delta);
+       struct timespec64 now, then = ns_to_timespec64(delta);
        unsigned long flags;
 
        spin_lock_irqsave(&pf->tmreg_lock, flags);
 
        i40e_ptp_read(pf, &now);
        now = timespec_add(now, then);
-       i40e_ptp_write(pf, (const struct timespec *)&now);
+       i40e_ptp_write(pf, (const struct timespec64 *)&now);
 
        spin_unlock_irqrestore(&pf->tmreg_lock, flags);
-
        return 0;
 }
 
 /**
- * i40e_ptp_gettime - Get the time of the PHC
+ * i40e_ptp_gettime64 - Get the time of the PHC
  * @ptp: The PTP clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec64 structure to hold the current time value
  *
  * Read the device clock and return the correct value on ns, after converting it
  * into a timespec struct.
  **/
-static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int i40e_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
        unsigned long flags;
@@ -190,20 +188,19 @@ static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
        spin_lock_irqsave(&pf->tmreg_lock, flags);
        i40e_ptp_read(pf, ts);
        spin_unlock_irqrestore(&pf->tmreg_lock, flags);
-
        return 0;
 }
 
 /**
- * i40e_ptp_settime - Set the time of the PHC
+ * i40e_ptp_settime64 - Set the time of the PHC
  * @ptp: The PTP clock structure
- * @ts: timespec structure that holds the new time value
+ * @ts: timespec64 structure that holds the new time value
  *
  * Set the device clock to the user input value. The conversion from timespec
  * to ns happens in the write function.
  **/
-static int i40e_ptp_settime(struct ptp_clock_info *ptp,
-                           const struct timespec *ts)
+static int i40e_ptp_settime64(struct ptp_clock_info *ptp,
+                           const struct timespec64 *ts)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
        unsigned long flags;
@@ -211,10 +208,48 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
        spin_lock_irqsave(&pf->tmreg_lock, flags);
        i40e_ptp_write(pf, ts);
        spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+       return 0;
+}
+
+#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64
+/**
+ * i40e_ptp_gettime - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       struct timespec64 ts64;
+       int err;
+
+       err = i40e_ptp_gettime64(ptp, &ts64);
+       if (err)
+               return err;
 
+       *ts = timespec64_to_timespec(ts64);
        return 0;
 }
 
+/**
+ * i40e_ptp_settime - Set the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime(struct ptp_clock_info *ptp,
+                           const struct timespec *ts)
+{
+       struct timespec64 ts64 = timespec_to_timespec64(*ts);
+
+       return i40e_ptp_settime64(ptp, &ts64);
+}
+#endif
+
 /**
  * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem
  * @ptp: The PTP clock structure
@@ -356,7 +391,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
 
        prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
 
-       if (!(prttsyn_stat & (1 << index)))
+       if (!(prttsyn_stat & BIT(index)))
                return;
 
        lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
@@ -407,8 +442,8 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
         * hardware will not update the clock until both registers have been
         * written.
         */
-       wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
-       wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
+       wr32(hw, I40E_PRTTSYN_INC_L, (u32)incval);
+       wr32(hw, I40E_PRTTSYN_INC_H, (u32)(incval >> 32));
 
        /* Update the base adjustement value. */
        ACCESS_ONCE(pf->ptp_base_adj) = incval;
@@ -576,8 +611,7 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
                return err;
 
        /* save these settings for future reference */
-       memcpy(&pf->tstamp_config, &config,
-              sizeof(pf->tstamp_config));
+       pf->tstamp_config = config;
 
        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
                -EFAULT : 0;
@@ -606,15 +640,19 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
        pf->ptp_caps.pps = 0;
        pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
        pf->ptp_caps.adjtime = i40e_ptp_adjtime;
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
+       pf->ptp_caps.gettime64 = i40e_ptp_gettime64;
+       pf->ptp_caps.settime64 = i40e_ptp_settime64;
+#else
        pf->ptp_caps.gettime = i40e_ptp_gettime;
        pf->ptp_caps.settime = i40e_ptp_settime;
+#endif
        pf->ptp_caps.enable = i40e_ptp_feature_enable;
 
        /* Attempt to register the clock before enabling the hardware. */
        pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
-       if (IS_ERR(pf->ptp_clock)) {
+       if (IS_ERR(pf->ptp_clock))
                return PTR_ERR(pf->ptp_clock);
-       }
 
        /* clear the hwtstamp settings here during clock create, instead of
         * during regular init, so that we can maintain settings across a
@@ -663,10 +701,11 @@ void i40e_ptp_init(struct i40e_pf *pf)
                dev_err(&pf->pdev->dev,
                        "PTP clock register failed: %ld\n", err);
        } else {
-               struct timespec ts;
+               struct timespec64 ts;
                u32 regval;
 
-               dev_info(&pf->pdev->dev, "PHC enabled\n");
+               if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+                       dev_info(&pf->pdev->dev, "PHC enabled\n");
                pf->flags |= I40E_FLAG_PTP;
 
                /* Ensure the clocks are running. */
@@ -684,8 +723,13 @@ void i40e_ptp_init(struct i40e_pf *pf)
                i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
 
                /* Set the clock value. */
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
+               ts = ktime_to_timespec64(ktime_get_real());
+               i40e_ptp_settime64(&pf->ptp_caps, &ts);
+#else
                ts = ktime_to_timespec(ktime_get_real());
                i40e_ptp_settime(&pf->ptp_caps, &ts);
+#endif
        }
 }
 
similarity index 99%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_register.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_register.h
index 962e3416abf2708cc98db6c11d6d358b43f2ed77..00abf0f8629726fe0f5bd4e990ab85cb9b985098 100644 (file)
 #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
 #define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
 #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
 #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
similarity index 87%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_txrx.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.c
index 7f1c718bf0c4e3c4a780e339b80960eeacb09960..0ecb43db978683c2a5fa6e3fbf9fa3e0fb604858 100644 (file)
@@ -164,9 +164,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_desc->cmd_type_offset_bsz =
                build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
 
-       /* set the timestamp */
-       tx_buf->time_stamp = jiffies;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.
         */
@@ -282,7 +279,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
        if (add) {
                pf->fd_tcp_rule++;
                if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
-                       dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
                        pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                }
        } else {
@@ -290,7 +288,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                                  (pf->fd_tcp_rule - 1) : 0;
                if (pf->fd_tcp_rule == 0) {
                        pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-                       dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
                }
        }
 
@@ -464,11 +463,12 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
        error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
                I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 
-       if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+       if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+               pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
                if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
                    (I40E_DEBUG_FD & pf->hw.debug_mask))
                        dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
-                                rx_desc->wb.qword0.hi_dword.fd_id);
+                                pf->fd_inv);
 
                /* Check if the programming error is for ATR.
                 * If so, auto disable ATR and set a state for
@@ -501,13 +501,13 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
                        if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                          !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
-                               dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+                               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                                       dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
                                pf->auto_disable_flags |=
                                                        I40E_FLAG_FD_SB_ENABLED;
                        }
                }
-       } else if (error ==
-                         (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+       } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
                if (I40E_DEBUG_FD & pf->hw.debug_mask)
                        dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
                                 rx_desc->wb.qword0.hi_dword.fd_id);
@@ -598,19 +598,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
        }
 }
 
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring:  tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
-       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-       return le32_to_cpu(*(volatile __le32 *)head);
-}
-
 /**
  * i40e_get_tx_pending - how many tx descriptors not processed
  * @tx_ring: the ring of descriptors
@@ -618,7 +605,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
  * Since there is no access to the ring head register
  * in XL710, we need to use our local copies
  **/
-static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+u32 i40e_get_tx_pending(struct i40e_ring *ring)
 {
        u32 head, tail;
 
@@ -781,18 +768,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
-       /* check to see if there are any non-cache aligned descriptors
-        * waiting to be written back, and kick the hardware to force
-        * them to be written back in case of napi polling
-        */
-       if (budget &&
-           !((i & WB_STRIDE) == WB_STRIDE) &&
-           !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
-           (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
-               tx_ring->arm_wb = true;
-       else
-               tx_ring->arm_wb = false;
-
        if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
                /* schedule immediate reset if we believe we hung */
                dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -803,10 +778,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                         tx_ring->vsi->seid,
                         tx_ring->queue_index,
                         tx_ring->next_to_use, i);
-               dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
-                        "  time_stamp           <%lx>\n"
-                        "  jiffies              <%lx>\n",
-                        tx_ring->tx_bi[i].time_stamp, jiffies);
 
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
@@ -852,7 +823,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
  * @q_vector: the vector  on which to force writeback
  *
  **/
-static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
        if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
                u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
@@ -876,12 +847,13 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 
        }
 }
-#ifndef I40E_LEGACY_INTERRUPT
 
 /**
  * i40e_set_new_dynamic_itr - Find new ITR level
  * @rc: structure containing ring performance data
  *
+ * Returns true if itr changed, false if not
+ *
  * Stores a new ITR value based on packets and byte counts during
  * the last interrupt.  The advantage of per interrupt computation
  * is faster updates and more accurate ITR for the current traffic
@@ -890,22 +862,33 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
  * testing data as well as attempting to minimize response time
  * while increasing bulk throughput.
  **/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
 {
        enum i40e_latency_range new_latency_range = rc->latency_range;
+       struct i40e_q_vector *qv = rc->ring->q_vector;
        u32 new_itr = rc->itr;
        int bytes_per_int;
+       int usecs;
 
        if (rc->total_packets == 0 || !rc->itr)
-               return;
+               return false;
 
        /* simple throttlerate management
-        *   0-10MB/s   lowest (100000 ints/s)
+        *   0-10MB/s   lowest (50000 ints/s)
         *  10-20MB/s   low    (20000 ints/s)
-        *  20-1249MB/s bulk   (8000 ints/s)
+        *  20-1249MB/s bulk   (18000 ints/s)
+        *  > 40000 rx packets per second (8000 ints/s)
+        *
+        * The math works out because the divisor is in 10^(-6) which
+        * turns the bytes/us input value into MB/s values, but
+        * make sure to use usecs, as the register values written
+        * are in 2 usec increments in the ITR registers, and make sure
+        * to use the smoothed values that the countdown timer gives us.
         */
-       bytes_per_int = rc->total_bytes / rc->itr;
-       switch (rc->itr) {
+       usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+       bytes_per_int = rc->total_bytes / usecs;
+
+       switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
                if (bytes_per_int > 10)
                        new_latency_range = I40E_LOW_LATENCY;
@@ -917,60 +900,53 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                        new_latency_range = I40E_LOWEST_LATENCY;
                break;
        case I40E_BULK_LATENCY:
+       case I40E_ULTRA_LATENCY:
+       default:
                if (bytes_per_int <= 20)
-                       rc->latency_range = I40E_LOW_LATENCY;
+                       new_latency_range = I40E_LOW_LATENCY;
                break;
        }
 
+       /* this is to adjust RX more aggressively when streaming small
+        * packets.  The value of 40000 was picked as it is just beyond
+        * what the hardware can receive per second if in low latency
+        * mode.
+        */
+#define RX_ULTRA_PACKET_RATE 40000
+
+       if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+           (&qv->rx == rc))
+               new_latency_range = I40E_ULTRA_LATENCY;
+
+       rc->latency_range = new_latency_range;
+
        switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
-               new_itr = I40E_ITR_100K;
+               new_itr = I40E_ITR_50K;
                break;
        case I40E_LOW_LATENCY:
                new_itr = I40E_ITR_20K;
                break;
        case I40E_BULK_LATENCY:
+               new_itr = I40E_ITR_18K;
+               break;
+       case I40E_ULTRA_LATENCY:
                new_itr = I40E_ITR_8K;
                break;
        default:
                break;
        }
 
-       if (new_itr != rc->itr) {
-               /* do an exponential smoothing */
-               new_itr = (10 * new_itr * rc->itr) /
-                         ((9 * new_itr) + rc->itr);
-               rc->itr = new_itr & I40E_MAX_ITR;
-       }
-
        rc->total_bytes = 0;
        rc->total_packets = 0;
-}
 
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-       u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-       struct i40e_hw *hw = &q_vector->vsi->back->hw;
-       u32 reg_addr;
-       u16 old_itr;
-
-       reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
-       old_itr = q_vector->rx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->rx);
-       if (old_itr != q_vector->rx.itr)
-               wr32(hw, reg_addr, q_vector->rx.itr);
-
-       reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
-       old_itr = q_vector->tx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->tx);
-       if (old_itr != q_vector->tx.itr)
-               wr32(hw, reg_addr, q_vector->tx.itr);
+       if (new_itr != rc->itr) {
+               rc->itr = new_itr;
+               return true;
+       }
+
+       return false;
 }
-#endif
 
 /**
  * i40e_clean_programming_status - clean the programming status descriptor
@@ -1182,9 +1158,9 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
        rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
        if (!rx_ring->rx_bi)
                goto err;
-
 #ifdef HAVE_NDO_GET_STATS64
-               u64_stats_init(rx_ring->syncp);
+
+       u64_stats_init(&rx_ring->syncp);
 #endif /* HAVE_NDO_GET_STATS64 */
 
        /* Round up to nearest 4K */
@@ -1444,7 +1420,9 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                      (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
        ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
                      (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+#ifndef HAVE_SKBUFF_CSUM_LEVEL
        skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
+#endif
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
 
        skb->ip_summed = CHECKSUM_NONE;
@@ -1459,7 +1437,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 #endif
 
        /* did the hardware decode the packet and checksum? */
-       if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+       if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
                return;
 
        /* both known and outer_ip must be set for the below code to work */
@@ -1474,8 +1452,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                ipv6 = true;
 
        if (ipv4 &&
-           (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-                        (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+           (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+                        BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
 #ifdef I40E_ADD_PROBES
        {
                vsi->back->rx_ip4_cso_err++;
@@ -1487,12 +1465,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 
        /* likely incorrect csum if alternate IP extension headers found */
        if (ipv6 &&
-           rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+           rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
                /* don't increment checksum err here, non-fatal err */
                return;
 
 #ifdef I40E_ADD_PROBES
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) {
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) {
                if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
                        vsi->back->rx_tcp_cso_err++;
                else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
@@ -1502,14 +1480,14 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        }
 #endif
        /* there was some L4 error, count error and punt packet to the stack */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
                goto checksum_fail;
 
        /* handle packets that were not able to be checksummed due
         * to arrival speed, in this case the stack can compute
         * the csum.
         */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
 #ifdef HAVE_VXLAN_RX_OFFLOAD
@@ -1519,23 +1497,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * so the total length of IPv4 header is IHL*4 bytes
         * The UDP_0 bit *may* bet set if the *inner* header is UDP
         */
-       if (ipv4_tunnel &&
-           (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
-           !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+       if (ipv4_tunnel) {
                i40e_set_transport_header(skb);
-
-               rx_udp_csum = udp_csum(skb);
-               iph = ip_hdr(skb);
-               csum = csum_tcpudp_magic(
-                               iph->saddr, iph->daddr,
-                               (skb->len - skb_transport_offset(skb)),
-                               IPPROTO_UDP, rx_udp_csum);
-
-               if (udp_hdr(skb)->check != csum)
-                       goto checksum_fail;
+               if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+                   (udp_hdr(skb)->check != 0)) {
+                       rx_udp_csum = udp_csum(skb);
+                       iph = ip_hdr(skb);
+                       csum = csum_tcpudp_magic(
+                                       iph->saddr, iph->daddr,
+                                       (skb->len - skb_transport_offset(skb)),
+                                       IPPROTO_UDP, rx_udp_csum);
+
+                       if (udp_hdr(skb)->check != csum)
+                               goto checksum_fail;
+
+               } /* else its GRE and so no outer UDP header */
        }
-
-#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
 #ifdef I40E_ADD_PROBES
        if ((decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
            (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4))
@@ -1549,6 +1527,9 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 #endif
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
+#ifdef HAVE_SKBUFF_CSUM_LEVEL
+       skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+#endif
        return;
 
 checksum_fail:
@@ -1635,7 +1616,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1653,8 +1634,11 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                if (likely(!skb)) {
                        skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
                                                        rx_ring->rx_hdr_len);
-                       if (!skb)
+                       if (!skb) {
                                rx_ring->rx_stats.alloc_buff_failed++;
+                               break;
+                       }
+
                        /* initialize queue mapping */
                        skb_record_rx_queue(skb, rx_ring->queue_index);
                        /* we are reusing so sync this buffer for CPU use */
@@ -1673,8 +1657,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1683,6 +1667,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                cleaned_count++;
                if (rx_hbo || rx_sph) {
                        int len;
+
                        if (rx_hbo)
                                len = I40E_RX_HDR_SIZE;
                        else
@@ -1726,7 +1711,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        struct i40e_rx_buffer *next_buffer;
 
                        next_buffer = &rx_ring->rx_bi[i];
@@ -1736,11 +1721,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
-                       /* TODO: shouldn't we increment a counter indicating the
-                        * drop?
-                        */
                        continue;
                }
 
@@ -1765,7 +1747,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
 #ifdef I40E_FCOE
@@ -1777,7 +1759,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_ring->netdev->last_rx = jiffies;
                rx_desc->wb.qword1.status_error_len = 0;
 
        } while (likely(total_rx_packets < budget));
@@ -1827,7 +1808,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1850,7 +1831,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1868,17 +1849,14 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        rx_ring->rx_stats.non_eop_descs++;
                        continue;
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
-                       /* TODO: shouldn't we increment a counter indicating the
-                        * drop?
-                        */
                        continue;
                }
 
@@ -1903,7 +1881,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
 #ifdef I40E_FCOE
@@ -1915,8 +1893,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_ring->netdev->last_rx = jiffies;
-
                rx_desc->wb.qword1.status_error_len = 0;
        } while (likely(total_rx_packets < budget));
 
@@ -1931,6 +1907,96 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        return total_rx_packets;
 }
 
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+       u32 val;
+
+       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+             (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+             (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+       return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_PFINT_DYN_CTLN
+
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+                                         struct i40e_q_vector *q_vector)
+{
+       struct i40e_hw *hw = &vsi->back->hw;
+       bool rx = false, tx = false;
+       u32 rxval, txval;
+       int vector;
+
+       vector = (q_vector->v_idx + vsi->base_vector);
+
+       /* avoid dynamic calulation if in countdown mode OR if
+        * all dynamic is disabled
+        */
+       rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+       if (q_vector->itr_countdown > 0 ||
+           (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+            !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+               goto enable_int;
+       }
+
+       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+               rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+               rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
+       }
+
+       if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+               tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+               txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
+       }
+
+       if (rx || tx) {
+               /* get the higher of the two ITR adjustments and
+                * use the same value for both ITR registers
+                * when in adaptive mode (rx and/or tx)
+                */
+               u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+               q_vector->tx.itr = q_vector->rx.itr = itr;
+               txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+               tx = true;
+               rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+               rx = true;
+       }
+
+       /* only need to enable the interrupt once, but need
+        * to possibly update both ITR values
+        */
+       if (rx) {
+               /* set the INTENA_MSK_MASK so that this first write
+                * won't actually enable the interrupt, instead just
+                * updating the ITR (it's bit 31 PF and VF)
+                */
+               rxval |= BIT(31);
+               /* don't check _DOWN because interrupt isn't being enabled */
+               wr32(hw, INTREG(vector - 1), rxval);
+       }
+
+enable_int:
+       if (!test_bit(__I40E_DOWN, &vsi->state))
+               wr32(hw, INTREG(vector - 1), txval);
+
+       if (q_vector->itr_countdown)
+               q_vector->itr_countdown--;
+       else
+               q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
+}
+
 /**
  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
@@ -1963,6 +2029,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        i40e_for_each_ring(ring, q_vector->tx) {
                clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
                arm_wb |= ring->arm_wb;
+               ring->arm_wb = false;
        }
 
        /* if i40e_busy_poll() has the vector or netpoll flag is set
@@ -2005,48 +2072,35 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
-#ifndef I40E_LEGACY_INTERRUPT
        if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-               if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-                   ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-                       i40e_update_dynamic_itr(q_vector);
-       }
-#endif
-       if (!test_bit(__I40E_DOWN, &vsi->state)) {
-               if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-                       i40e_irq_dynamic_enable(vsi,
-                                       q_vector->v_idx + vsi->base_vector);
-               } else {
-                       struct i40e_hw *hw = &vsi->back->hw;
-                       /* We re-enable the queue 0 cause, but
-                        * don't worry about dynamic_enable
-                        * because we left it on for the other
-                        * possible interrupts during napi
-                        */
-                       u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
-                       qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
-                       wr32(hw, I40E_QINT_RQCTL(0), qval);
-
-                       qval = rd32(hw, I40E_QINT_TQCTL(0));
-                       qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
-                       wr32(hw, I40E_QINT_TQCTL(0), qval);
-
-                       i40e_irq_dynamic_enable_icr0(vsi->back);
-               }
+               i40e_update_enable_itr(vsi, q_vector);
+       } else { /* Legacy mode */
+               struct i40e_hw *hw = &vsi->back->hw;
+               /* We re-enable the queue 0 cause, but
+                * don't worry about dynamic_enable
+                * because we left it on for the other
+                * possible interrupts during napi
+                */
+               u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+
+               qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_RQCTL(0), qval);
+               qval = rd32(hw, I40E_QINT_TQCTL(0));
+               qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_TQCTL(0), qval);
+               i40e_irq_dynamic_enable_icr0(vsi->back);
        }
-
        return 0;
 }
-
 /**
  * i40e_atr - Add a Flow Director ATR filter
  * @tx_ring:  ring to add programming descriptor to
  * @skb:      send buffer
- * @flags:    send flags
+ * @tx_flags: send tx flags
  * @protocol: wire protocol
  **/
 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                    u32 flags, __be16 protocol)
+                    u32 tx_flags, __be16 protocol)
 {
        struct i40e_filter_program_desc *fdir_desc;
        struct i40e_pf *pf = tx_ring->vsi->back;
@@ -2071,24 +2125,40 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (!tx_ring->atr_sample_rate)
                return;
 
-       /* snag network header to get L4 type and address */
-       hdr.network = skb_network_header(skb);
-
-       /* Currently only IPv4/IPv6 with TCP is supported */
-       if (protocol == htons(ETH_P_IP)) {
-               if (hdr.ipv4->protocol != IPPROTO_TCP)
-                       return;
+       if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
+               return;
 
-               /* access ihl as a u8 to avoid unaligned access on ia64 */
-               hlen = (hdr.network[0] & 0x0F) << 2;
-       } else if (protocol == htons(ETH_P_IPV6)) {
-               if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+#ifdef HAVE_SKB_INNER_NETWORK_HEADER
+       if ((tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
+               hdr.network = skb_inner_network_header(skb);
+               hlen = skb_inner_network_header_len(skb);
+       } else {
+#endif
+               /* snag network header to get L4 type and address */
+               hdr.network = skb_network_header(skb);
+
+               /* Currently only IPv4/IPv6 with TCP is supported */
+               /* access ihl as u8 to avoid unaligned access on ia64 */
+               if (tx_flags & I40E_TX_FLAGS_IPV4)
+                       hlen = (hdr.network[0] & 0x0F) << 2;
+               else if (protocol == htons(ETH_P_IPV6))
+                       hlen = sizeof(struct ipv6hdr);
+               else
                        return;
+#ifdef HAVE_SKB_INNER_NETWORK_HEADER
+       }
+#endif
 
-               hlen = sizeof(struct ipv6hdr);
-       } else {
+       /* Currently only IPv4/IPv6 with TCP is supported */
+       /* Note: tx_flags gets modified to reflect inner protocols in
+        * tx_enable_csum function if encap is enabled.
+        */
+       if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
+           (hdr.ipv4->protocol != IPPROTO_TCP))
+               return;
+       else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
+                  (hdr.ipv6->nexthdr != IPPROTO_TCP))
                return;
-       }
 
        th = (struct tcphdr *)(hdr.network + hlen);
 
@@ -2139,9 +2209,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
 
        dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
-       dtype_cmd |=
-               ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
-               I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+       if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+               dtype_cmd |=
+                       ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+       else
+               dtype_cmd |=
+                       ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
        fdir_desc->rsvd = cpu_to_le32(0);
@@ -2162,25 +2239,43 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
 #ifdef I40E_FCOE
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                              struct i40e_ring *tx_ring,
-                              u32 *flags)
+inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+                                     struct i40e_ring *tx_ring, u32 *flags)
 #else
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                                     struct i40e_ring *tx_ring,
-                                     u32 *flags)
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+                                            struct i40e_ring *tx_ring,
+                                            u32 *flags)
 #endif
 {
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
 
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+       if (protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+#else
+       if (protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->netdev->features & NETIF_F_HW_VLAN_TX)) {
+#endif
+               /* When HW VLAN acceleration is turned off by the user the
+                * stack sets the protocol to 8021q so that the driver
+                * can take any steps required to support the SW only
+                * VLAN handling.  In our case the driver doesn't need
+                * to take any further steps so just set the protocol
+                * to the encapsulated ethertype.
+                */
+               skb->protocol = vlan_get_protocol(skb);
+               goto out;
+       }
+
        /* if we have a HW VLAN tag being added, default to the HW one */
-       if (vlan_tx_tag_present(skb)) {
-               tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= I40E_TX_FLAGS_HW_VLAN;
        /* else if it is a SW VLAN, check the next protocol and store the tag */
        } else if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_hdr *vhdr, _vhdr;
+
                vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
                if (!vhdr)
                        return -EINVAL;
@@ -2201,6 +2296,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
                if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
                        struct vlan_ethhdr *vhdr;
+
                        if (skb_header_cloned(skb) &&
                            pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
                                return -ENOMEM;
@@ -2221,16 +2317,14 @@ out:
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
  * @hdr_len:  ptr to the size of the packet header
  * @cd_tunneling: ptr to context descriptor bits
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                   u32 tx_flags, __be16 protocol, u8 *hdr_len,
-                   u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+                   u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+                   u32 *cd_tunneling)
 {
        u32 cd_cmd, cd_tso_len, cd_mss;
        struct tcphdr *tcph;
@@ -2359,12 +2453,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
 /**
  * i40e_tx_enable_csum - Enable Tx checksum offloads
  * @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
  * @cd_tunneling: ptr to context desc bits
  **/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                                u32 *td_cmd, u32 *td_offset,
                                struct i40e_ring *tx_ring,
                                u32 *cd_tunneling)
@@ -2374,13 +2468,14 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
        struct iphdr *this_ip_hdr;
        u32 network_hdr_len;
        u8 l4_hdr = 0;
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
        u32 l4_tunnel = 0;
 
-#ifdef HAVE_ENCAP_CSUM_OFFLOAD
        if (skb->encapsulation) {
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
                        l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
                default:
                        return;
@@ -2390,20 +2485,20 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                this_ipv6_hdr = inner_ipv6_hdr(skb);
                this_tcp_hdrlen = inner_tcp_hdrlen(skb);
 
-               if (tx_flags & I40E_TX_FLAGS_IPV4) {
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+                       if (*tx_flags & I40E_TX_FLAGS_TSO) {
                                *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
                                ip_hdr(skb)->check = 0;
 #ifdef I40E_ADD_PROBES
-                       tx_ring->vsi->back->tx_ip4_cso++;
+                               tx_ring->vsi->back->tx_ip4_cso++;
 #endif
                        } else {
                                *cd_tunneling |=
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
-               } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+               } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-                       if (tx_flags & I40E_TX_FLAGS_TSO)
+                       if (*tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
                }
 
@@ -2415,8 +2510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
                if (this_ip_hdr->version == 6) {
-                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
-                       tx_flags |= I40E_TX_FLAGS_IPV6;
+                       *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       *tx_flags |= I40E_TX_FLAGS_IPV6;
                }
 
        } else {
@@ -2433,12 +2528,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
 #endif /* HAVE_ENCAP_CSUM_OFFLOAD */
 
        /* Enable IP checksum offloads */
-       if (tx_flags & I40E_TX_FLAGS_IPV4) {
+       if (*tx_flags & I40E_TX_FLAGS_IPV4) {
                l4_hdr = this_ip_hdr->protocol;
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
-               if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_TSO) {
                        *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
                        this_ip_hdr->check = 0;
 #ifdef I40E_ADD_PROBES
@@ -2451,7 +2546,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                *td_offset = (network_hdr_len >> 2) <<
                              I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
 #ifdef NETIF_F_IPV6_CSUM
-       } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+       } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                l4_hdr = this_ipv6_hdr->nexthdr;
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
                /* Now set the td_offset for IP header length */
@@ -2534,14 +2629,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
  * @skb:      send buffer
  * @tx_flags: collected send information
- * @hdr_len:  size of the packet header
  *
  * Note: Our HW can't scatter-gather more than 8 fragments to build
  * a packet on the wire and so we need to figure out the cases where we
  * need to linearize the skb.
  **/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
-                               const u8 hdr_len)
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
 {
        struct skb_frag_struct *frag;
        bool linearize = false;
@@ -2553,11 +2646,8 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
        gso_segs = skb_shinfo(skb)->gso_segs;
 
        if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
-               u16 j = 1;
+               u16 j = 0;
 
-#ifdef I40E_ADD_PROBES
-               tx_ring->vsi->back->tcp_segs += gso_segs;
-#endif
                if (num_frags < (I40E_MAX_BUFFER_TXD))
                        goto linearize_chk_done;
                /* try the simple math, if we have too many frags per segment */
@@ -2567,22 +2657,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
                        goto linearize_chk_done;
                }
                frag = &skb_shinfo(skb)->frags[0];
-               size = hdr_len;
                /* we might still have more fragments per segment */
                do {
                        size += skb_frag_size(frag);
                        frag++; j++;
+                       if ((size >= skb_shinfo(skb)->gso_size) &&
+                           (j < I40E_MAX_BUFFER_TXD)) {
+                               size = (size % skb_shinfo(skb)->gso_size);
+                               j = (size) ? 1 : 0;
+                       }
                        if (j == I40E_MAX_BUFFER_TXD) {
-                               if (size < skb_shinfo(skb)->gso_size) {
-                                       linearize = true;
-                                       break;
-                               } else {
-                                       j = 1;
-                                       size -= skb_shinfo(skb)->gso_size;
-                                       if (size)
-                                               j++;
-                                       size += hdr_len;
-                               }
+                               linearize = true;
+                               break;
                        }
                        num_frags--;
                } while (num_frags);
@@ -2595,6 +2681,47 @@ linearize_chk_done:
        return linearize;
 }
 
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       /* Memory barrier before checking head and tail */
+       smp_mb();
+
+       /* Check again in a case another CPU has just made room available. */
+       if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+               return -EBUSY;
+
+       /* A reprieve! - use start_queue because it doesn't call schedule */
+       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       ++tx_ring->tx_stats.restart_queue;
+       return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+#ifdef I40E_FCOE
+inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+#else
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+#endif
+{
+       if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+               return 0;
+       return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
 /**
  * i40e_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
@@ -2606,13 +2733,13 @@ linearize_chk_done:
  * @td_offset: offset for checksum or crc
  **/
 #ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                struct i40e_tx_buffer *first, u32 tx_flags,
-                const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        struct i40e_tx_buffer *first, u32 tx_flags,
                        const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
+static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                              struct i40e_tx_buffer *first, u32 tx_flags,
+                              const u8 hdr_len, u32 td_cmd, u32 td_offset)
 #endif
 {
        struct skb_frag_struct *frag;
@@ -2631,12 +2758,16 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                         I40E_TX_FLAGS_VLAN_SHIFT;
        }
 
-       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
                gso_segs = skb_shinfo(skb)->gso_segs;
-       } else {
+       else
                gso_segs = 1;
-       }
 
+#ifdef I40E_ADD_PROBES
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+               tx_ring->vsi->back->tcp_segs += gso_segs;
+
+#endif
        data_len = skb->data_len;
        size = skb_headlen(skb);
 
@@ -2707,27 +2838,35 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        /* Place RS bit on last descriptor of any packet that spans across the
         * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
         */
-       if (((i & WB_STRIDE) != WB_STRIDE) &&
+#ifdef HAVE_SKB_XMIT_MORE
+       if (skb->xmit_more  &&
+           ((tx_ring->packet_stride & WB_STRIDE) != WB_STRIDE) &&
            (first <= &tx_ring->tx_bi[i]) &&
            (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+               tx_ring->packet_stride++;
                tx_desc->cmd_type_offset_bsz =
                        build_ctob(td_cmd, td_offset, size, td_tag) |
                        cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
                                         I40E_TXD_QW1_CMD_SHIFT);
        } else {
+               tx_ring->packet_stride = 0;
                tx_desc->cmd_type_offset_bsz =
                        build_ctob(td_cmd, td_offset, size, td_tag) |
                        cpu_to_le64((u64)I40E_TXD_CMD <<
                                         I40E_TXD_QW1_CMD_SHIFT);
        }
+#else
+       tx_desc->cmd_type_offset_bsz =
+               build_ctob(td_cmd, td_offset, size, td_tag) |
+               cpu_to_le64((u64)I40E_TXD_CMD <<
+                                I40E_TXD_QW1_CMD_SHIFT);
+
+#endif
 
        netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
                                                 tx_ring->queue_index),
                             first->bytecount);
 
-       /* set the timestamp */
-       first->time_stamp = jiffies;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -2744,8 +2883,18 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        tx_ring->next_to_use = i;
 
+       i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
        /* notify HW of packet */
+#ifdef HAVE_SKB_XMIT_MORE
+       if (!skb->xmit_more ||
+           netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+                                                  tx_ring->queue_index)))
+               writel(i, tx_ring->tail);
+       else
+               prefetchw(tx_desc + 1);
+#else
        writel(i, tx_ring->tail);
+#endif /* HAVE_XMIT_MORE */
 
        return;
 
@@ -2766,47 +2915,6 @@ dma_error:
        tx_ring->next_to_use = i;
 }
 
-/**
- * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns -EBUSY if a stop is needed, else 0
- **/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
-       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-       /* Memory barrier before checking head and tail */
-       smp_mb();
-
-       /* Check again in a case another CPU has just made room available. */
-       if (likely(I40E_DESC_UNUSED(tx_ring) < size))
-               return -EBUSY;
-
-       /* A reprieve! - use start_queue because it doesn't call schedule */
-       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
-       ++tx_ring->tx_stats.restart_queue;
-       return 0;
-}
-
-/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-#ifdef I40E_FCOE
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-#else
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-#endif
-{
-       if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
-               return 0;
-       return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
 /**
  * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
  * @skb:     send buffer
@@ -2817,11 +2925,11 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  * one descriptor.
  **/
 #ifdef I40E_FCOE
-int i40e_xmit_descriptor_count(struct sk_buff *skb,
-                              struct i40e_ring *tx_ring)
-#else
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
                                      struct i40e_ring *tx_ring)
+#else
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+                                            struct i40e_ring *tx_ring)
 #endif
 {
        unsigned int f;
@@ -2880,6 +2988,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        int tsyn;
 #endif /* HAVE_PTP_1588_CLOCK */
        int tso;
+
        if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
                return NETDEV_TX_BUSY;
 
@@ -2899,7 +3008,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
 
-       tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+       tso = i40e_tso(tx_ring, skb, &hdr_len,
                       &cd_type_cmd_tso_mss, &cd_tunneling);
 
        if (tso < 0)
@@ -2907,10 +3016,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
-       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+       if (i40e_chk_linearize(skb, tx_flags)) {
                if (skb_linearize(skb))
                        goto out_drop;
-
+               tx_ring->tx_stats.tx_linearize++;
+       }
        skb_tx_timestamp(skb);
 
 #ifdef HAVE_PTP_1588_CLOCK
@@ -2927,7 +3037,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_flags |= I40E_TX_FLAGS_CSUM;
 
-               i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+               i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
                                    tx_ring, &cd_tunneling);
        }
 
@@ -2946,8 +3056,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 #ifndef HAVE_TRANS_START_IN_QUEUE
        tx_ring->netdev->trans_start = jiffies;
 #endif
-       i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
-
        return NETDEV_TX_OK;
 
 out_drop:
similarity index 79%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_txrx.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.h
index 1add829d7ea69451bb13436b853c26ee29ff16e8..dc4ed1da74debb1e72624b378be56aa7bc8a64b8 100644 (file)
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
 #define I40E_MIN_ITR               0x0001  /* reg uses 2 usec resolution */
 #define I40E_ITR_100K              0x0005
+#define I40E_ITR_50K               0x000A
 #define I40E_ITR_20K               0x0019
+#define I40E_ITR_18K               0x001B
 #define I40E_ITR_8K                0x003E
 #define I40E_ITR_4K                0x007A
-#define I40E_ITR_RX_DEF            I40E_ITR_8K
-#define I40E_ITR_TX_DEF            I40E_ITR_4K
+#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF            I40E_ITR_20K
+#define I40E_ITR_TX_DEF            I40E_ITR_20K
 #define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
 #define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
 #define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
 #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
 #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
 #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA                  BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K              125     /* 8000 ints/sec */
+#define I40E_INTRL_62K             16      /* 62500 ints/sec */
+#define I40E_INTRL_83K             12      /* 83333 ints/sec */
 
 #define I40E_QUEUE_END_OF_LIST 0x7FF
 
@@ -66,17 +78,19 @@ enum i40e_dyn_idx_t {
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define i40e_pf_get_default_rss_hena(pf) I40E_DEFAULT_RSS_HENA
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,18 +143,19 @@ enum i40e_dyn_idx_t {
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING   4
 
-#define I40E_TX_FLAGS_CSUM             (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN          (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN          (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO              (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4             (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6             (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
+#define I40E_TX_FLAGS_CSUM             BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN          BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN          BIT(2)
+#define I40E_TX_FLAGS_TSO              BIT(3)
+#define I40E_TX_FLAGS_IPV4             BIT(4)
+#define I40E_TX_FLAGS_IPV6             BIT(5)
+#define I40E_TX_FLAGS_FCCRC            BIT(6)
+#define I40E_TX_FLAGS_FSO              BIT(7)
 #ifdef HAVE_PTP_1588_CLOCK
-#define I40E_TX_FLAGS_TSYN             (u32)(1 << 8)
+#define I40E_TX_FLAGS_TSYN             BIT(8)
 #endif /* HAVE_PTP_1588_CLOCK */
-#define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
+#define I40E_TX_FLAGS_FD_SB            BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -148,13 +163,13 @@ enum i40e_dyn_idx_t {
 
 struct i40e_tx_buffer {
        struct i40e_tx_desc *next_to_watch;
-       unsigned long time_stamp;
        union {
                struct sk_buff *skb;
                void *raw_buf;
        };
        unsigned int bytecount;
        unsigned short gso_segs;
+
        DEFINE_DMA_UNMAP_ADDR(dma);
        DEFINE_DMA_UNMAP_LEN(len);
        u32 tx_flags;
@@ -178,6 +193,7 @@ struct i40e_tx_queue_stats {
        u64 restart_queue;
        u64 tx_busy;
        u64 tx_done_old;
+       u64 tx_linearize;
 };
 
 struct i40e_rx_queue_stats {
@@ -256,6 +272,7 @@ struct i40e_ring {
 #endif /* HAVE_PTP_1588_CLOCK */
        bool ring_active;               /* is ring online or not */
        bool arm_wb;            /* do something to arm write back */
+       u16  packet_stride;
 
        /* stats structs */
        struct i40e_queue_stats stats;
@@ -280,6 +297,7 @@ enum i40e_latency_range {
        I40E_LOWEST_LATENCY = 0,
        I40E_LOW_LATENCY = 1,
        I40E_BULK_LATENCY = 2,
+       I40E_ULTRA_LATENCY = 3,
 };
 
 struct i40e_ring_container {
@@ -353,12 +371,27 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring);
 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
 int i40e_napi_poll(struct napi_struct *napi, int budget);
 #ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                struct i40e_tx_buffer *first, u32 tx_flags,
-                const u8 hdr_len, u32 td_cmd, u32 td_offset);
+void i40e_tx_map(struct i40e_ring *, struct sk_buff *, struct i40e_tx_buffer *,
+                u32, const u8, u32, u32);
 int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
-int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                              struct i40e_ring *tx_ring, u32 *flags);
+int i40e_xmit_descriptor_count(struct sk_buff *, struct i40e_ring *);
+int i40e_tx_prepare_vlan_flags(struct sk_buff *, struct i40e_ring *,
+                              u32 *flags);
 #endif
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
+u32 i40e_get_tx_pending(struct i40e_ring *ring);
+
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
 #endif /* _I40E_TXRX_H_ */
similarity index 91%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_type.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_type.h
index f0b1454af2e49cff599de7d2cccb4495cd26fcf9..3e04d4e9f342b62ce68027c61fff3b0ccecaad12 100644 (file)
 #include "i40e_adminq.h"
 #include "i40e_hmc.h"
 #include "i40e_lan_hmc.h"
+#include "i40e_devids.h"
 
 #define UNREFERENCED_XPARAMETER
 
-/* Vendor ID */
-#define I40E_INTEL_VENDOR_ID           0x8086
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710          0x1572
-#define I40E_DEV_ID_QEMU               0x1574
-#define I40E_DEV_ID_KX_A               0x157F
-#define I40E_DEV_ID_KX_B               0x1580
-#define I40E_DEV_ID_KX_C               0x1581
-#define I40E_DEV_ID_QSFP_A             0x1583
-#define I40E_DEV_ID_QSFP_B             0x1584
-#define I40E_DEV_ID_QSFP_C             0x1585
-#define I40E_DEV_ID_10G_BASE_T         0x1586
-#define I40E_DEV_ID_20G_KR2            0x1587
-#define I40E_DEV_ID_VF                 0x154C
-#define I40E_DEV_ID_VF_HV              0x1571
-
-#define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
-                                        (d) == I40E_DEV_ID_QSFP_B  || \
-                                        (d) == I40E_DEV_ID_QSFP_C)
-
 /* I40E_MASK is a macro used on 32 bit registers */
 #define I40E_MASK(mask, shift) (mask << shift)
 
@@ -207,14 +187,14 @@ enum i40e_set_fc_aq_failures {
 };
 
 enum i40e_vsi_type {
-       I40E_VSI_MAIN = 0,
-       I40E_VSI_VMDQ1,
-       I40E_VSI_VMDQ2,
-       I40E_VSI_CTRL,
-       I40E_VSI_FCOE,
-       I40E_VSI_MIRROR,
-       I40E_VSI_SRIOV,
-       I40E_VSI_FDIR,
+       I40E_VSI_MAIN   = 0,
+       I40E_VSI_VMDQ1  = 1,
+       I40E_VSI_VMDQ2  = 2,
+       I40E_VSI_CTRL   = 3,
+       I40E_VSI_FCOE   = 4,
+       I40E_VSI_MIRROR = 5,
+       I40E_VSI_SRIOV  = 6,
+       I40E_VSI_FDIR   = 7,
        I40E_VSI_TYPE_UNKNOWN
 };
 
@@ -238,16 +218,64 @@ struct i40e_link_status {
        bool crc_enable;
        u8 pacing;
        u8 requested_speeds;
+       u8 module_type[3];
+       /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP           0x03
+#define I40E_MODULE_TYPE_QSFP          0x0D
+       /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE    0x01
+#define I40E_MODULE_TYPE_40G_LR4       0x02
+#define I40E_MODULE_TYPE_40G_SR4       0x04
+#define I40E_MODULE_TYPE_40G_CR4       0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR   0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR   0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM  0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER   0x80
+       /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX   0x01
+#define I40E_MODULE_TYPE_1000BASE_LX   0x02
+#define I40E_MODULE_TYPE_1000BASE_CX   0x04
+#define I40E_MODULE_TYPE_1000BASE_T    0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+       I40E_CAP_PHY_TYPE_SGMII                 = BIT(I40E_PHY_TYPE_SGMII),
+       I40E_CAP_PHY_TYPE_1000BASE_KX           = BIT(I40E_PHY_TYPE_1000BASE_KX),
+       I40E_CAP_PHY_TYPE_10GBASE_KX4           = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+       I40E_CAP_PHY_TYPE_10GBASE_KR            = BIT(I40E_PHY_TYPE_10GBASE_KR),
+       I40E_CAP_PHY_TYPE_40GBASE_KR4           = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+       I40E_CAP_PHY_TYPE_XAUI                  = BIT(I40E_PHY_TYPE_XAUI),
+       I40E_CAP_PHY_TYPE_XFI                   = BIT(I40E_PHY_TYPE_XFI),
+       I40E_CAP_PHY_TYPE_SFI                   = BIT(I40E_PHY_TYPE_SFI),
+       I40E_CAP_PHY_TYPE_XLAUI                 = BIT(I40E_PHY_TYPE_XLAUI),
+       I40E_CAP_PHY_TYPE_XLPPI                 = BIT(I40E_PHY_TYPE_XLPPI),
+       I40E_CAP_PHY_TYPE_40GBASE_CR4_CU        = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_CR1_CU        = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_AOC           = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+       I40E_CAP_PHY_TYPE_40GBASE_AOC           = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+       I40E_CAP_PHY_TYPE_100BASE_TX            = BIT(I40E_PHY_TYPE_100BASE_TX),
+       I40E_CAP_PHY_TYPE_1000BASE_T            = BIT(I40E_PHY_TYPE_1000BASE_T),
+       I40E_CAP_PHY_TYPE_10GBASE_T             = BIT(I40E_PHY_TYPE_10GBASE_T),
+       I40E_CAP_PHY_TYPE_10GBASE_SR            = BIT(I40E_PHY_TYPE_10GBASE_SR),
+       I40E_CAP_PHY_TYPE_10GBASE_LR            = BIT(I40E_PHY_TYPE_10GBASE_LR),
+       I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU       = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_CR1           = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+       I40E_CAP_PHY_TYPE_40GBASE_CR4           = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+       I40E_CAP_PHY_TYPE_40GBASE_SR4           = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+       I40E_CAP_PHY_TYPE_40GBASE_LR4           = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+       I40E_CAP_PHY_TYPE_1000BASE_SX           = BIT(I40E_PHY_TYPE_1000BASE_SX),
+       I40E_CAP_PHY_TYPE_1000BASE_LX           = BIT(I40E_PHY_TYPE_1000BASE_LX),
+       I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL    = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+       I40E_CAP_PHY_TYPE_20GBASE_KR2           = BIT(I40E_PHY_TYPE_20GBASE_KR2)
 };
 
 struct i40e_phy_info {
        struct i40e_link_status link_info;
        struct i40e_link_status link_info_old;
-       u32 autoneg_advertised;
-       u32 phy_id;
-       u32 module_type;
        bool get_link_info;
        enum i40e_media_type media_type;
+       /* all the phy types the NVM is capable of */
+       u32 phy_types;
 };
 
 #define I40E_HW_CAP_MAX_GPIO                   30
@@ -272,7 +300,17 @@ struct i40e_hw_capabilities {
        bool dcb;
        bool fcoe;
        bool iscsi; /* Indicates iSCSI enabled */
-       bool mfp_mode_1;
+       bool flex10_enable;
+       bool flex10_capable;
+       u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN       0x0
+#define I40E_FLEX10_MODE_DCC           0x1
+#define I40E_FLEX10_MODE_DCI           0x2
+
+       u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR   0x1
+#define I40E_FLEX10_STATUS_VC_MODE     0x2
+
        bool mgmt_cem;
        bool ieee_1588;
        bool iwarp;
@@ -301,6 +339,7 @@ struct i40e_hw_capabilities {
        u8 rx_buf_chain_len;
        u32 enabled_tcmap;
        u32 maxtc;
+       u64 wr_csr_prot;
 };
 
 struct i40e_mac_info {
@@ -328,6 +367,7 @@ struct i40e_nvm_info {
        bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
        u16 version;              /* NVM package version */
        u32 eetrack;              /* NVM data version */
+       u32 oem_ver;              /* OEM version info */
 };
 
 /* definitions used in NVM update support */
@@ -346,12 +386,17 @@ enum i40e_nvmupd_cmd {
        I40E_NVMUPD_CSUM_CON,
        I40E_NVMUPD_CSUM_SA,
        I40E_NVMUPD_CSUM_LCB,
+       I40E_NVMUPD_STATUS,
+       I40E_NVMUPD_EXEC_AQ,
+       I40E_NVMUPD_GET_AQ_RESULT,
 };
 
 enum i40e_nvmupd_state {
        I40E_NVMUPD_STATE_INIT,
        I40E_NVMUPD_STATE_READING,
-       I40E_NVMUPD_STATE_WRITING
+       I40E_NVMUPD_STATE_WRITING,
+       I40E_NVMUPD_STATE_INIT_WAIT,
+       I40E_NVMUPD_STATE_WRITE_WAIT,
 };
 
 /* nvm_access definition and its masks/shifts need to be accessible to
@@ -370,6 +415,7 @@ enum i40e_nvmupd_state {
 #define I40E_NVM_SA            (I40E_NVM_SNT | I40E_NVM_LCB)
 #define I40E_NVM_ERA           0x4
 #define I40E_NVM_CSUM          0x8
+#define I40E_NVM_EXEC          0xf
 
 #define I40E_NVM_ADAPT_SHIFT   16
 #define I40E_NVM_ADAPT_MASK    (0xffffULL << I40E_NVM_ADAPT_SHIFT)
@@ -450,6 +496,8 @@ struct i40e_fc_info {
 #define I40E_APP_PROTOID_FIP           0x8914
 #define I40E_APP_SEL_ETHTYPE           0x1
 #define I40E_APP_SEL_TCPIP             0x2
+#define I40E_CEE_APP_SEL_ETHTYPE       0x0
+#define I40E_CEE_APP_SEL_TCPIP         0x1
 
 /* CEE or IEEE 802.1Qaz ETS Configuration data */
 struct i40e_dcb_ets_config {
@@ -480,7 +528,10 @@ struct i40e_dcbx_config {
        u8  dcbx_mode;
 #define I40E_DCBX_MODE_CEE     0x1
 #define I40E_DCBX_MODE_IEEE    0x2
+       u8  app_mode;
+#define I40E_DCBX_APPS_NON_WILLING     0x1
        u32 numapps;
+       u32 tlv_status; /* CEE mode TLV status */
        struct i40e_dcb_ets_config etscfg;
        struct i40e_dcb_ets_config etsrec;
        struct i40e_dcb_pfc_config pfc;
@@ -532,6 +583,8 @@ struct i40e_hw {
 
        /* state of nvm update process */
        enum i40e_nvmupd_state nvmupd_state;
+       struct i40e_aq_desc nvm_wb_desc;
+       struct i40e_virt_mem nvm_buff;
 
        /* HMC info */
        struct i40e_hmc_info hmc; /* HMC info struct */
@@ -540,14 +593,16 @@ struct i40e_hw {
        u16 dcbx_status;
 
        /* DCBX info */
-       struct i40e_dcbx_config local_dcbx_config;
-       struct i40e_dcbx_config remote_dcbx_config;
+       struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+       struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+       struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
 
        /* debug mask */
        u32 debug_mask;
+       char err_str[16];
 };
 
-static inline bool i40e_is_vf(struct i40e_hw *hw)
+static INLINE bool i40e_is_vf(struct i40e_hw *hw)
 {
        return hw->mac.type == I40E_MAC_VF;
 }
@@ -666,7 +721,7 @@ enum i40e_rx_desc_status_bits {
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT      0
-#define I40E_RXD_QW1_STATUS_MASK       (((1 << I40E_RX_DESC_STATUS_LAST) - 1) << \
+#define I40E_RXD_QW1_STATUS_MASK       ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
                                         I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -674,8 +729,7 @@ enum i40e_rx_desc_status_bits {
                                             I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK     (0x1UL << \
-                                        I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK   BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT      I40E_RX_DESC_STATUS_UMBCAST
 #define I40E_RXD_QW1_STATUS_UMBCAST_MASK       (0x3UL << \
@@ -821,8 +875,7 @@ enum i40e_rx_ptype_payload_layer {
                                         I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT  63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK   (0x1ULL << \
-                                        I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK   BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 #define I40E_RXD_QW1_NEXTP_SHIFT       38
 #define I40E_RXD_QW1_NEXTP_MASK                (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
@@ -1025,12 +1078,11 @@ enum i40e_tx_ctx_desc_eipt_offload {
 #define I40E_TXD_CTX_QW0_NATT_SHIFT    9
 #define I40E_TXD_CTX_QW0_NATT_MASK     (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING     (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING     BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING     (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT       11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK        (0x1ULL << \
-                                        I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK        BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST      I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -1135,8 +1187,7 @@ enum i40e_filter_program_desc_pcmd {
 #define I40E_TXD_FLTR_QW1_DEST_MASK    (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT        (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
-                                        I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT      (0x9ULL + \
                                                 I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1267,6 +1318,9 @@ struct i40e_hw_port_stats {
        /* flow director stats */
        u64 fd_atr_match;
        u64 fd_sb_match;
+       u64 fd_atr_tunnel_match;
+       u32 fd_atr_status;
+       u32 fd_sb_status;
        /* EEE LPI */
        u32 tx_lpi_status;
        u32 rx_lpi_status;
@@ -1292,6 +1346,7 @@ struct i40e_hw_port_stats {
 #define I40E_SR_PBA_FLAGS                      0x15
 #define I40E_SR_PBA_BLOCK_PTR                  0x16
 #define I40E_SR_BOOT_CONFIG_PTR                        0x17
+#define I40E_NVM_OEM_VER_OFF                   0x83
 #define I40E_SR_NVM_DEV_STARTER_VERSION                0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
similarity index 94%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_virtchnl.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl.h
index d7455caa92931320ca121aa8be53709ce49242bc..ef78a917d253c2a660c4400b93a44b54f9d23494 100644 (file)
@@ -81,7 +81,6 @@ enum i40e_virtchnl_ops {
        I40E_VIRTCHNL_OP_GET_STATS = 15,
        I40E_VIRTCHNL_OP_FCOE = 16,
        I40E_VIRTCHNL_OP_EVENT = 17,
-       I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
@@ -110,7 +109,9 @@ struct i40e_virtchnl_msg {
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR            1
-#define I40E_VIRTCHNL_VERSION_MINOR            0
+#define I40E_VIRTCHNL_VERSION_MINOR            1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
 struct i40e_virtchnl_version_info {
        u32 major;
        u32 minor;
@@ -129,7 +130,8 @@ struct i40e_virtchnl_version_info {
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,10 +145,14 @@ struct i40e_virtchnl_vsi_resource {
        u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2    0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE  0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN  0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2            0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP         0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE          0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ                0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG       0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR     0x00000020
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
 
 struct i40e_virtchnl_vf_resource {
        u16 num_vsis;
similarity index 88%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_virtchnl_pf.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c
index a0da76d79cf85aa9b7d15c7984a8986f904e8498..3af1ad9c4418b72a7ad5e4d0c644d36fa8c1ad4c 100644 (file)
 
 #include "i40e.h"
 
+/*********************notification routines***********************/
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the PF structure
+ * @opcode: operation code
+ * @retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+                                enum i40e_virtchnl_ops v_opcode,
+                                i40e_status v_retval, u8 *msg,
+                                u16 msglen)
+{
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_vf *vf = pf->vf;
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+               /* Not all vfs are enabled so skip the ones that are not */
+               if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+                   !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+                       continue;
+
+               /* Ignore return value on purpose - a given VF may fail, but
+                * we need to keep going and send to all of them
+                */
+               i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
+                                      msg, msglen, NULL);
+       }
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ **/
+static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
+{
+       struct i40e_virtchnl_pf_event pfe;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_link_status *ls = &pf->hw.phy.link_info;
+       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+       pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+       pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       if (vf->link_forced) {
+               pfe.event_data.link_event.link_status = vf->link_up;
+               pfe.event_data.link_event.link_speed =
+                       (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+       } else {
+#endif
+               pfe.event_data.link_event.link_status =
+                       ls->link_info & I40E_AQ_LINK_UP;
+               pfe.event_data.link_event.link_speed = ls->link_speed;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       }
+#endif
+       i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+                              I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the PF structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++)
+               i40e_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+       struct i40e_virtchnl_pf_event pfe;
+
+       pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+       pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+       i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+                            (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the VF structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+       struct i40e_virtchnl_pf_event pfe;
+       int abs_vf_id;
+
+       /* validate the request */
+       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+               return;
+
+       /* verify if the VF is in either init or active before proceeding */
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+           !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+               return;
+
+       abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+
+       pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+       pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+       i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+                              I40E_SUCCESS, (u8 *)&pfe,
+                              sizeof(struct i40e_virtchnl_pf_event), NULL);
+}
 /***********************misc routines*****************************/
 
 /**
  **/
 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
 {
-       struct i40e_hw *hw = &pf->hw;
-       u32 reg;
-
-       reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
-       reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-       wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
-       i40e_flush(hw);
+       i40e_vc_notify_vf_reset(vf);
+       i40e_reset_vf(vf, false);
 }
 
 /**
@@ -53,11 +175,12 @@ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
  *
  * check for the valid VSI id
  **/
-static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 {
        struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
-       return pf->vsi[vsi_id]->vf_id == vf->vf_id;
+       return (vsi && (vsi->vf_id == vf->vf_id));
 }
 
 /**
@@ -68,12 +191,13 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
  *
  * check for the valid queue id
  **/
-static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
                                            u8 qid)
 {
        struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
-       return qid < pf->vsi[vsi_id]->alloc_queue_pairs;
+       return (vsi && (qid < vsi->alloc_queue_pairs));
 }
 
 /**
@@ -95,16 +219,16 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 /**
  * i40e_vc_get_pf_queue_id
  * @vf: pointer to the VF info
- * @vsi_idx: index of VSI in PF struct
+ * @vsi_id: id of VSI as provided by the FW
  * @vsi_queue_id: vsi relative queue id
  *
  * return PF relative queue id
  **/
-static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
                                   u8 vsi_queue_id)
 {
        struct i40e_pf *pf = vf->pf;
-       struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
        u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 
        if (le16_to_cpu(vsi->info.mapping_flags) &
@@ -121,12 +245,12 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
 /**
  * i40e_config_irq_link_list
  * @vf: pointer to the VF info
- * @vsi_idx: index of VSI in PF struct
+ * @vsi_id: id of VSI as given by the FW
  * @vecmap: irq map info
  *
  * configure irq link list from the map
  **/
-static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
                                      struct i40e_virtchnl_vector_map *vecmap)
 {
        unsigned long linklistmap = 0, tempmap;
@@ -171,7 +295,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                                 I40E_VIRTCHNL_SUPPORTED_QTYPES));
        vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
        qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 
        wr32(hw, reg_idx, reg);
@@ -198,7 +322,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
                        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
                        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
-                       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+                       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
                                                              vsi_queue_id);
                } else {
                        pf_queue_id = I40E_QUEUE_END_OF_LIST;
@@ -209,11 +333,23 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                reg = (vector_id) |
                    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
                    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
-                   (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+                   BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
                    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
                wr32(hw, reg_idx, reg);
        }
 
+       /* if the vf is running in polling mode and using interrupt zero,
+        * need to disable auto-mask on enabling zero interrupt for VFs.
+        */
+       if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
+           (vector_id == 0)) {
+               reg = rd32(hw, I40E_GLINT_CTL);
+               if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
+                       reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+                       wr32(hw, I40E_GLINT_CTL, reg);
+               }
+       }
+
 irq_list_done:
        i40e_flush(hw);
 }
@@ -221,24 +357,26 @@ irq_list_done:
 /**
  * i40e_config_vsi_tx_queue
  * @vf: pointer to the VF info
- * @vsi_idx: index of VSI in PF struct
+ * @vsi_id: id of VSI as provided by the FW
  * @vsi_queue_id: vsi relative queue index
  * @info: config. info
  *
  * configure tx queue
  **/
-static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
                                    u16 vsi_queue_id,
                                    struct i40e_virtchnl_txq_info *info)
 {
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_hmc_obj_txq tx_ctx;
+       struct i40e_vsi *vsi;
        u16 pf_queue_id;
        u32 qtx_ctl;
        int ret = 0;
 
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+       vsi = i40e_find_vsi_from_id(pf, vsi_id);
 
        /* clear the context structure first */
        memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -246,10 +384,10 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
        /* only set the required fields */
        tx_ctx.base = info->dma_ring_addr / 128;
        tx_ctx.qlen = info->ring_len;
-       tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
+       tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
        tx_ctx.rdylist_act = 0;
        tx_ctx.head_wb_ena = info->headwb_enabled;
-       tx_ctx.head_wb_addr = info->dma_headwb_addr; 
+       tx_ctx.head_wb_addr = info->dma_headwb_addr;
 
        /* clear the context in the HMC */
        ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -288,13 +426,13 @@ error_context:
 /**
  * i40e_config_vsi_rx_queue
  * @vf: pointer to the VF info
- * @vsi_idx: index of VSI in PF struct
+ * @vsi_id: id of VSI  as provided by the FW
  * @vsi_queue_id: vsi relative queue index
  * @info: config. info
  *
  * configure rx queue
  **/
-static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
                                    u16 vsi_queue_id,
                                    struct i40e_virtchnl_rxq_info *info)
 {
@@ -304,7 +442,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
        u16 pf_queue_id;
        int ret = 0;
 
-       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 
        /* clear the context structure first */
        memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
@@ -401,7 +539,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
        }
        if (type == I40E_VSI_SRIOV) {
                u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-               vf->lan_vsi_index = vsi->idx;
+
+               vf->lan_vsi_idx = vsi->idx;
                vf->lan_vsi_id = vsi->id;
                /* If the port VLAN has been configured and then the
                 * VF driver was removed then the VSI port VLAN
@@ -411,20 +550,25 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                 */
                if (vf->port_vlan_id)
                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+
+               spin_lock_bh(&vsi->mac_filter_list_lock);
                f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
-                                   vf->port_vlan_id, true, false);
+                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
+                                   true, false);
                if (!f)
                        dev_info(&pf->pdev->dev,
                                 "Could not allocate VF MAC addr\n");
-               f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+               f = i40e_add_filter(vsi, brdcast,
+                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
                                    true, false);
                if (!f)
                        dev_info(&pf->pdev->dev,
                                 "Could not allocate VF broadcast filter\n");
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
        }
 
        /* program mac filter */
-       ret = i40e_sync_vsi_filters(vsi);
+       ret = i40e_sync_vsi_filters(vsi, false);
        if (ret)
                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 
@@ -466,8 +610,9 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 
        /* map PF queues to VF queues */
-       for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) {
-               u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+       for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
+               u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
+
                reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
                wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
                total_queue_pairs++;
@@ -475,13 +620,13 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 
        /* map PF queues to VSI */
        for (j = 0; j < 7; j++) {
-               if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) {
+               if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
                        reg = 0x07FF07FF;       /* unused */
                } else {
-                       u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+                       u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
                                                          j * 2);
                        reg = qid;
-                       qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+                       qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
                                                      (j * 2) + 1);
                        reg |= qid << 16;
                }
@@ -525,9 +670,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
        int i, msix_vf;
 
        /* free vsi & disconnect it from the parent uplink */
-       if (vf->lan_vsi_index) {
-               i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
-               vf->lan_vsi_index = 0;
+       if (vf->lan_vsi_idx) {
+               i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+               vf->lan_vsi_idx = 0;
                vf->lan_vsi_id = 0;
        }
        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -564,6 +709,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
         */
        vf->num_queue_pairs = 0;
        vf->vf_states = 0;
+       clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 }
 
 /**
@@ -582,7 +728,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
        ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
        if (ret)
                goto error_alloc;
-       total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
+       total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
        set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 
        /* store the total qps number for the runtime
@@ -682,9 +828,13 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
                }
        }
 
+       if (flr)
+               usleep_range(10000, 20000);
+
        if (!rsd)
                dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
                        vf->vf_id);
+       usleep_range(10000, 20000);
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
        /* clear the reset bit in the VPGEN_VFRTRIG reg */
        reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
@@ -692,59 +842,24 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 
        /* On initial reset, we won't have any queues */
-       if (vf->lan_vsi_index == 0)
+       if (vf->lan_vsi_idx == 0)
                goto complete_reset;
 
-       i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+       i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
 complete_reset:
        /* reallocate VF resources to reset the VSI state */
        i40e_free_vf_res(vf);
-       i40e_alloc_vf_res(vf);
-       i40e_enable_vf_mappings(vf);
-       set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
-
+       if (!i40e_alloc_vf_res(vf)) {
+               i40e_enable_vf_mappings(vf);
+               set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+               clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+       }
        /* tell the VF the reset is done */
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
        i40e_flush(hw);
        clear_bit(__I40E_VF_DISABLE, &pf->state);
 }
 
-#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)
-/**
- * i40e_vfs_are_assigned
- * @pf: pointer to the PF structure
- *
- * Determine if any VFs are assigned to VMs
- **/
-static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
-{
-       struct pci_dev *pdev = pf->pdev;
-       struct pci_dev *vfdev;
-
-       /* loop through all the VFs to see if we own any that are assigned */
-       vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
-       while (vfdev) {
-               /* if we don't own it we don't care */
-               if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
-                       /* if it is assigned we cannot release it */
-                       if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
-                               return true;
-               }
-
-               vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
-                                      I40E_DEV_ID_VF,
-                                      vfdev);
-       }
-
-       return false;
-}
-#else
-static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
-{
-       return false;
-}
-#endif /* HAVE_SRIOV_CONFIGURE */
-
 /**
  * i40e_free_vfs
  * @pf: pointer to the PF structure
@@ -762,12 +877,19 @@ void i40e_free_vfs(struct i40e_pf *pf)
        while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
                usleep_range(1000, 2000);
 
+       for (i = 0; i < pf->num_alloc_vfs; i++)
+               if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+                       i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+                                              false);
+
        /* Disable IOV before freeing resources. This lets any VF drivers
         * running in the host get themselves cleaned up before we yank
         * the carpet out from underneath their feet.
         */
-       if (!i40e_vfs_are_assigned(pf))
+       if (!pci_vfs_assigned(pf->pdev))
                pci_disable_sriov(pf->pdev);
+       else
+               dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
 
        msleep(20); /* let any messages in transit get finished up */
 
@@ -788,18 +910,15 @@ void i40e_free_vfs(struct i40e_pf *pf)
         * assigned. Setting the number of VFs to 0 through sysfs is caught
         * before this function ever gets called.
         */
-       if (!i40e_vfs_are_assigned(pf)) {
+       if (!pci_vfs_assigned(pf->pdev)) {
                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
                 * work correctly when SR-IOV gets re-enabled.
                 */
                for (vf_id = 0; vf_id < tmp; vf_id++) {
                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
-                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
                }
-       } else {
-               dev_warn(&pf->pdev->dev,
-                        "unable to disable SR-IOV because VFs are assigned.\n");
        }
        clear_bit(__I40E_VF_DISABLE, &pf->state);
 }
@@ -824,13 +943,11 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
        if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
                ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
                if (ret) {
-                       dev_err(&pf->pdev->dev,
-                               "Failed to enable SR-IOV, error %d.\n", ret);
+                       pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
                        pf->num_alloc_vfs = 0;
                        goto err_iov;
                }
        }
-
        /* allocate memory */
        vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
        if (!vfs) {
@@ -853,8 +970,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
                /* VF resources get allocated during reset */
                i40e_reset_vf(&vfs[i], false);
 
-               /* enable VF vplan_qtable mappings */
-               i40e_enable_vf_mappings(&vfs[i]);
        }
        pf->num_alloc_vfs = num_alloc_vfs;
 
@@ -883,17 +998,26 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
        int pre_existing_vfs = pci_num_vf(pdev);
        int err = 0;
 
-       dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
+       if (test_bit(__I40E_TESTING, &pf->state)) {
+               dev_warn(&pdev->dev,
+                        "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+               err = -EPERM;
+               goto err_out;
+       }
+
        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
                i40e_free_vfs(pf);
        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
                goto out;
 
        if (num_vfs > pf->num_req_vfs) {
+               dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
+                        num_vfs, pf->num_req_vfs);
                err = -EPERM;
                goto err_out;
        }
 
+       dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
        err = i40e_alloc_vfs(pf, num_vfs);
        if (err) {
                dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
@@ -925,14 +1049,14 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
                if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
                        pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
                        i40e_do_reset_safe(pf,
-                                          (1 << __I40E_PF_RESET_REQUESTED));
+                                          BIT_ULL(__I40E_PF_RESET_REQUESTED));
                }
                return i40e_pci_sriov_enable(pdev, num_vfs);
        }
-       if (!i40e_vfs_are_assigned(pf)) {
+       if (!pci_vfs_assigned(pdev)) {
                i40e_free_vfs(pf);
                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-               i40e_do_reset_safe(pf, (u64)(1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
        } else {
                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
                return -EINVAL;
@@ -985,6 +1109,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
                }
        } else {
                vf->num_valid_msgs++;
+               /* reset the invalid counter, if a valid message is received. */
+               vf->num_invalid_msgs = 0;
        }
 
        aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
@@ -1020,12 +1146,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
  *
  * called from the VF to request the API version used by the PF
  **/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
 {
        struct i40e_virtchnl_version_info info = {
                I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
        };
 
+       vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+       /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+       if (VF_IS_V10(vf))
+               info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
                                      I40E_SUCCESS, (u8 *)&info,
                                      sizeof(struct
@@ -1040,7 +1170,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
  *
  * called from the VF to request its resources
  **/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
 {
        struct i40e_virtchnl_vf_resource *vfres = NULL;
        struct i40e_pf *pf = vf->pf;
@@ -1064,22 +1194,34 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
                len = 0;
                goto err;
        }
+       if (VF_IS_V11(vf))
+               vf->driver_caps = *(u32 *)msg;
+       else
+               vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+                                 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+                                 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
 
        vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi->info.pvid)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+
+       if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
 
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
-       if (vf->lan_vsi_index) {
-               vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+       if (vf->lan_vsi_idx) {
+               vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
                vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
-               vfres->vsi_res[i].num_queue_pairs =
-                   pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
-               memcpy(vfres->vsi_res[i].default_mac_addr,
-                      vf->default_lan_addr.addr, ETH_ALEN);
+               vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+               /* VFs only use TC 0 */
+               vfres->vsi_res[i].qset_handle
+                                         = LE16_TO_CPU(vsi->info.qs_handle[0]);
+               ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+                               vf->default_lan_addr.addr);
                i++;
        }
        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -1125,18 +1267,18 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
            (struct i40e_virtchnl_promisc_info *)msg;
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
-       struct i40e_vsi *vsi;
        bool allmulti = false;
+       struct i40e_vsi *vsi;
        i40e_status aq_ret;
 
+       vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
            !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
-           (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+           (vsi->type != I40E_VSI_FCOE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-       vsi = pf->vsi[info->vsi_id];
        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
                allmulti = true;
        aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
@@ -1199,7 +1341,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        }
 
        /* set vsi num_queue_pairs in use to num configured by VF */
-       pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs;
+       pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
 
 error_param:
        /* send the response to the VF */
@@ -1300,7 +1442,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-       if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+
+       if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
                aq_ret = I40E_ERR_TIMEOUT;
 error_param:
        /* send the response to the VF */
@@ -1322,7 +1465,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        struct i40e_virtchnl_queue_select *vqs =
            (struct i40e_virtchnl_queue_select *)msg;
        struct i40e_pf *pf = vf->pf;
-       u16 vsi_id = vqs->vsi_id;
        i40e_status aq_ret = 0;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
@@ -1339,7 +1481,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-       if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+
+       if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
                aq_ret = I40E_ERR_TIMEOUT;
 
 error_param:
@@ -1377,7 +1520,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                goto error_param;
        }
 
-       vsi = pf->vsi[vqs->vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1455,7 +1598,12 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                if (ret)
                        goto error_param;
        }
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
+
+       /* Lock once, because all function inside for loop accesses VSI's
+        * MAC filter list which needs to be protected using same lock.
+        */
+       spin_lock_bh(&vsi->mac_filter_list_lock);
 
        /* add new addresses to the list */
        for (i = 0; i < al->num_elements; i++) {
@@ -1475,13 +1623,17 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        dev_err(&pf->pdev->dev,
                                "Unable to add VF MAC filter\n");
                        ret = I40E_ERR_PARAM;
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
                        goto error_param;
                }
        }
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* program the updated filter list */
-       if (i40e_sync_vsi_filters(vsi))
-               dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+       ret = i40e_sync_vsi_filters(vsi, false);
+       if (ret)
+               dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+                       vf->vf_id, ret);
 
 error_param:
        /* send the response to the VF */
@@ -1523,16 +1675,20 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        goto error_param;
                }
        }
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
 
+       spin_lock_bh(&vsi->mac_filter_list_lock);
        /* delete addresses from the list */
        for (i = 0; i < al->num_elements; i++)
                i40e_del_filter(vsi, al->list[i].addr,
                                I40E_VLAN_ANY, true, false);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* program the updated filter list */
-       if (i40e_sync_vsi_filters(vsi))
-               dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+       ret = i40e_sync_vsi_filters(vsi, false);
+       if (ret)
+               dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+                       vf->vf_id, ret);
 
 error_param:
        /* send the response to the VF */
@@ -1573,7 +1729,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                        goto error_param;
                }
        }
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (vsi->info.pvid) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1583,6 +1739,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        for (i = 0; i < vfl->num_elements; i++) {
                /* add new VLAN filter */
                int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+
                if (ret)
                        dev_err(&pf->pdev->dev,
                                "Unable to add VF vlan filter %d, error %d\n",
@@ -1626,7 +1783,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                }
        }
 
-       vsi = pf->vsi[vsi_id];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (vsi->info.pvid) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -1634,6 +1791,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
 
        for (i = 0; i < vfl->num_elements; i++) {
                int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+
                if (ret)
                        dev_err(&pf->pdev->dev,
                                "Unable to delete VF vlan filter %d, error %d\n",
@@ -1670,9 +1828,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
                valid_len = sizeof(struct i40e_virtchnl_version_info);
                break;
        case I40E_VIRTCHNL_OP_RESET_VF:
-       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
                valid_len = 0;
                break;
+       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+               if (VF_IS_V11(vf))
+                       valid_len = sizeof(u32);
+               else
+                       valid_len = 0;
+               break;
        case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
                valid_len = sizeof(struct i40e_virtchnl_txq_info);
                break;
@@ -1740,7 +1903,6 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
        case I40E_VIRTCHNL_OP_UNKNOWN:
        default:
                return -EPERM;
-               break;
        }
        /* few more checks */
        if ((valid_len != msglen) || (err_msg_format)) {
@@ -1785,10 +1947,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
 
        switch (v_opcode) {
        case I40E_VIRTCHNL_OP_VERSION:
-               ret = i40e_vc_get_version_msg(vf);
+               ret = i40e_vc_get_version_msg(vf, msg);
                break;
        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
-               ret = i40e_vc_get_vf_resources_msg(vf);
+               ret = i40e_vc_get_vf_resources_msg(vf, msg);
                break;
        case I40E_VIRTCHNL_OP_RESET_VF:
                i40e_vc_reset_vf_msg(vf);
@@ -1805,6 +1967,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
                break;
        case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
                ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+               i40e_vc_notify_vf_link_state(vf);
                break;
        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
                ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
@@ -1865,9 +2028,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
                /* read GLGEN_VFLRSTAT register to find out the flr vfs */
                vf = &pf->vf[vf_id];
                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
-               if (reg & (1 << bit_idx)) {
+               if (reg & BIT(bit_idx)) {
                        /* clear the bit in GLGEN_VFLRSTAT */
-                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 
                        if (!test_bit(__I40E_DOWN, &pf->state))
                                i40e_reset_vf(vf, true);
@@ -1877,121 +2040,6 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
        return 0;
 }
 
-/**
- * i40e_vc_vf_broadcast
- * @pf: pointer to the PF structure
- * @opcode: operation code
- * @retval: return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send a message to all VFs on a given PF
- **/
-static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
-                                enum i40e_virtchnl_ops v_opcode,
-                                i40e_status v_retval, u8 *msg,
-                                u16 msglen)
-{
-       struct i40e_hw *hw = &pf->hw;
-       struct i40e_vf *vf = pf->vf;
-       int i;
-
-       for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
-               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-               /* Not all vfs are enabled so skip the ones that are not */
-               if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
-                   !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
-                       continue;
-
-               /* Ignore return value on purpose - a given VF may fail, but
-                * we need to keep going and send to all of them
-                */
-               i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
-                                      msg, msglen, NULL);
-       }
-}
-
-/**
- * i40e_vc_notify_link_state
- * @pf: pointer to the PF structure
- *
- * send a link status message to all VFs on a given PF
- **/
-void i40e_vc_notify_link_state(struct i40e_pf *pf)
-{
-       struct i40e_virtchnl_pf_event pfe;
-       struct i40e_hw *hw = &pf->hw;
-       struct i40e_vf *vf = pf->vf;
-       struct i40e_link_status *ls = &pf->hw.phy.link_info;
-       int i;
-
-       pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
-       pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
-       for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
-               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-#ifdef HAVE_NDO_SET_VF_LINK_STATE
-               if (vf->link_forced) {
-                       pfe.event_data.link_event.link_status = vf->link_up;
-                       pfe.event_data.link_event.link_speed =
-                               (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
-               } else {
-#endif
-                       pfe.event_data.link_event.link_status =
-                               ls->link_info & I40E_AQ_LINK_UP;
-                       pfe.event_data.link_event.link_speed = ls->link_speed;
-#ifdef HAVE_NDO_SET_VF_LINK_STATE
-               }
-#endif
-               i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
-                                      I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe),
-                                      NULL);
-       }
-}
-
-/**
- * i40e_vc_notify_reset
- * @pf: pointer to the PF structure
- *
- * indicate a pending reset to all VFs on a given PF
- **/
-void i40e_vc_notify_reset(struct i40e_pf *pf)
-{
-       struct i40e_virtchnl_pf_event pfe;
-
-       pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
-       pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
-       i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
-                            (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
-}
-
-/**
- * i40e_vc_notify_vf_reset
- * @vf: pointer to the VF structure
- *
- * indicate a pending reset to the given VF
- **/
-void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
-{
-       struct i40e_virtchnl_pf_event pfe;
-       int abs_vf_id;
-
-       /* validate the request */
-       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
-               return;
-
-       /* verify if the VF is in either init or active before proceeding */
-       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
-           !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
-               return;
-
-       abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
-
-       pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
-       pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
-       i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
-                              I40E_SUCCESS, (u8 *)&pfe,
-                              sizeof(struct i40e_virtchnl_pf_event), NULL);
-}
 #ifdef IFLA_VF_MAX
 
 /**
@@ -2020,7 +2068,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        }
 
        vf = &(pf->vf[vf_id]);
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev,
                        "Uninitialized VF %d\n", vf_id);
@@ -2035,8 +2083,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
                goto error_param;
        }
 
+       /* Lock once because below invoked function add/del_filter requires
+        * mac_filter_list_lock to be held
+        */
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
        /* delete the temporary mac address */
-       i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+       i40e_del_filter(vsi, vf->default_lan_addr.addr,
+                       vf->port_vlan_id ? vf->port_vlan_id : -1,
                        true, false);
 
        /* Delete all the filters for this VSI - we're going to kill it
@@ -2045,9 +2099,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        list_for_each_entry(f, &vsi->mac_filter_list, list)
                i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
 
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
        dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
        /* program mac filter */
-       if (i40e_sync_vsi_filters(vsi)) {
+       if (i40e_sync_vsi_filters(vsi, false)) {
                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
                ret = -EIO;
                goto error_param;
@@ -2074,8 +2130,10 @@ error_param:
 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                              int vf_id, u16 vlan_id, u8 qos)
 {
+       u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
+       bool is_vsi_in_vlan = false;
        struct i40e_vsi *vsi;
        struct i40e_vf *vf;
        int ret = 0;
@@ -2094,14 +2152,22 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
        }
 
        vf = &(pf->vf[vf_id]);
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
                ret = -EINVAL;
                goto error_pvid;
        }
 
-       if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
+       if (le16_to_cpu(vsi->info.pvid) == vlanprio)
+               /* duplicate request, so just return success */
+               goto error_pvid;
+
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+       is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
                dev_err(&pf->pdev->dev,
                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
                        vf_id);
@@ -2121,7 +2187,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
         * MAC addresses deleted.
         */
        if ((!(vlan_id || qos) ||
-           (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+           vlanprio != le16_to_cpu(vsi->info.pvid)) &&
            vsi->info.pvid)
                ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
 
@@ -2136,8 +2202,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                }
        }
        if (vlan_id || qos)
-               ret = i40e_vsi_add_pvid(vsi,
-                               vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+               ret = i40e_vsi_add_pvid(vsi, vlanprio);
        else
                i40e_vsi_remove_pvid(vsi);
 
@@ -2205,7 +2270,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int max_tx_rate)
        }
 
        vf = &(pf->vf[vf_id]);
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
                ret = -EINVAL;
@@ -2292,7 +2357,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 
        vf = &(pf->vf[vf_id]);
        /* first vsi is always the LAN vsi */
-       vsi = pf->vsi[vf->lan_vsi_index];
+       vsi = pf->vsi[vf->lan_vsi_idx];
        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
                ret = -EINVAL;
@@ -2301,7 +2366,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 
        ivi->vf = vf_id;
 
-       memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
+       ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
 
 #ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
        ivi->max_tx_rate = vf->tx_rate;
@@ -2427,7 +2492,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
 
        vf->spoofchk = enable;
        memset(&ctxt, 0, sizeof(ctxt));
-       ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
+       ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
        ctxt.pf_num = pf->hw.pf_id;
        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
        if (enable)
similarity index 92%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/i40e_virtchnl_pf.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h
index f49a39e9037f0f7cba859a8822c8e24aa2fe3f0c..85a5131aae72d8e866e4333317e52fecf4b56101 100644 (file)
@@ -29,8 +29,6 @@
 
 #include "i40e.h"
 
-#define I40E_MAX_MACVLAN_FILTERS 256
-#define I40E_MAX_VLAN_FILTERS 256
 #define I40E_MAX_VLANID 4095
 
 #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
@@ -42,6 +40,9 @@
 #define I40E_VLAN_MASK                 0xFFF
 #define I40E_PRIORITY_MASK             0x7000
 
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
        I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -78,6 +79,8 @@ struct i40e_vf {
        u16 vf_id;
        /* all VF vsis connect to the same parent */
        enum i40e_switch_element_types parent_type;
+       struct i40e_virtchnl_version_info vf_ver;
+       u32 driver_caps; /* reported by VF driver */
 
        /* VF Port Extender (PE) stag if used */
        u16 stag;
@@ -91,7 +94,7 @@ struct i40e_vf {
         * When assigned, these will be non-zero, because VSI 0 is always
         * the main LAN VSI for the PF.
         */
-       u8 lan_vsi_index;       /* index into PF struct */
+       u8 lan_vsi_idx;         /* index into PF struct */
        u8 lan_vsi_id;          /* ID as used by firmware */
 #ifdef I40E_FCOE
        u8 fcoe_vsi_index;
@@ -100,7 +103,8 @@ struct i40e_vf {
 
        u8 num_queue_pairs;     /* num of qps assigned to VF vsis */
        u64 num_mdd_events;     /* num of mdd events detected */
-       u64 num_invalid_msgs;   /* num of malformed or invalid msgs detected */
+       /* num of continuous malformed or invalid msgs detected */
+       u64 num_invalid_msgs;
        u64 num_valid_msgs;     /* num of valid msgs detected */
 
        unsigned long vf_caps;  /* vf's adv. capabilities */
similarity index 97%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/kcompat.c
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.c
index 079dfa5ee7af957dbdbd4cb7105e5b9a84c8474a..f51540d5a65d044c0b3ef358e921d4ccb318ca96 100644 (file)
@@ -938,8 +938,6 @@ void _kc_print_hex_dump(const char *level,
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
 #ifdef NAPI
-#if defined(DRIVER_IXGBE) || defined(DRIVER_IGB) || defined(DRIVER_I40E) || \
-       defined(DRIVER_IXGBEVF)
 struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
 {
        struct adapter_q_vector *q_vector = container_of(napi,
@@ -947,20 +945,13 @@ struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
                                                        napi);
        return &q_vector->poll_dev;
 }
-#endif
 
 int __kc_adapter_clean(struct net_device *netdev, int *budget)
 {
        int work_done;
        int work_to_do = min(*budget, netdev->quota);
-#if defined(DRIVER_IXGBE) || defined(DRIVER_IGB) || defined(DRIVER_I40E) || \
-       defined(E1000E_MQ) || defined(DRIVER_IXGBEVF)
        /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
        struct napi_struct *napi = netdev->priv;
-#else
-       struct adapter_struct *adapter = netdev_priv(netdev);
-       struct napi_struct *napi = &adapter->rx_ring[0].napi;
-#endif
        work_done = napi->poll(napi, work_to_do);
        *budget -= work_done;
        netdev->quota -= work_done;
@@ -1024,21 +1015,19 @@ void _kc_netif_tx_start_all_queues(struct net_device *netdev)
 }
 #endif /* HAVE_TX_MQ */
 
-#ifndef __WARN_printf
 void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
 {
        va_list args;
 
        printk(KERN_WARNING "------------[ cut here ]------------\n");
-       printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
+       printk(KERN_WARNING "WARNING: at %s:%d \n", file, line);
        va_start(args, fmt);
        vprintk(fmt, args);
        va_end(args);
 
        dump_stack();
 }
-#endif /* __WARN_printf */
-#endif /* < 2.6.27 */
+#endif /* __VMKLNX__ */
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
@@ -1717,6 +1706,62 @@ int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev)
 #endif /* CONFIG_PCI_IOV */
 #endif /* 3.10.0 */
 
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
+const unsigned char pcie_link_speed[] = {
+       PCI_SPEED_UNKNOWN,      /* 0 */
+       PCIE_SPEED_2_5GT,       /* 1 */
+       PCIE_SPEED_5_0GT,       /* 2 */
+       PCIE_SPEED_8_0GT,       /* 3 */
+       PCI_SPEED_UNKNOWN,      /* 4 */
+       PCI_SPEED_UNKNOWN,      /* 5 */
+       PCI_SPEED_UNKNOWN,      /* 6 */
+       PCI_SPEED_UNKNOWN,      /* 7 */
+       PCI_SPEED_UNKNOWN,      /* 8 */
+       PCI_SPEED_UNKNOWN,      /* 9 */
+       PCI_SPEED_UNKNOWN,      /* A */
+       PCI_SPEED_UNKNOWN,      /* B */
+       PCI_SPEED_UNKNOWN,      /* C */
+       PCI_SPEED_UNKNOWN,      /* D */
+       PCI_SPEED_UNKNOWN,      /* E */
+       PCI_SPEED_UNKNOWN       /* F */
+};
+
+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+                              enum pcie_link_width *width)
+{
+       int ret;
+
+       *speed = PCI_SPEED_UNKNOWN;
+       *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+       while (dev) {
+               u16 lnksta;
+               enum pci_bus_speed next_speed;
+               enum pcie_link_width next_width;
+
+               ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+               if (ret)
+                       return ret;
+
+               next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+               next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+                       PCI_EXP_LNKSTA_NLW_SHIFT;
+
+               if (next_speed < *speed)
+                       *speed = next_speed;
+
+               if (next_width < *width)
+                       *width = next_width;
+
+               dev = dev->bus->self;
+       }
+
+       return 0;
+}
+
+#endif
+
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
 int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask)
 {
similarity index 95%
rename from i40e-dkms-1.2.48/i40e-1.2.48/src/i40e/kcompat.h
rename to i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.h
index 34a84bc18fd4ac3b48a62906908b90c3e2812a33..1d43af658e62f2a593cf2b2aed76a0e449b93190 100644 (file)
@@ -296,14 +296,6 @@ struct msix_entry {
 #define VLAN_ETH_FRAME_LEN 1518
 #endif
 
-#if !defined(IXGBE_DCA) && !defined(IGB_DCA)
-#define dca_get_tag(b) 0
-#define dca_add_requester(a) -1
-#define dca_remove_requester(b) do { } while(0)
-#define DCA_PROVIDER_ADD     0x0001
-#define DCA_PROVIDER_REMOVE  0x0002
-#endif
-
 #ifndef DCA_GET_TAG_TWO_ARGS
 #define dca3_get_tag(a,b) dca_get_tag(b)
 #endif
@@ -701,6 +693,13 @@ struct _kc_ethtool_pauseparam {
 #define ETHTOOL_BUSINFO_LEN    32
 #endif
 
+#ifndef SPEED_2500
+#define SPEED_2500 2500
+#endif
+#ifndef SPEED_5000
+#define SPEED_5000 5000
+#endif
+
 #ifndef RHEL_RELEASE_VERSION
 #define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
 #endif
@@ -797,13 +796,16 @@ struct _kc_ethtool_pauseparam {
 #elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)))
 /* SLES11 SP4 is 3.0.101 based */
 #define SLE_VERSION_CODE SLE_VERSION(11,4,0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)))
+/* SLES12 GA is 3.12.28 based */
+#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
 /* new SLES kernels must be added here with >= based on kernel
  * the idea is to order from newest to oldest and just catch all
  * of them using the >=
  */
-#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)))
-/* SLES12 GA is 3.12.y based */
-#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,47)))
+/* SLES12 SP1 is 3.12.47-based */
+#define SLE_VERSION_CODE SLE_VERSION(12,1,0)
 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
 #endif /* CONFIG_SUSE_KERNEL */
 #ifndef SLE_VERSION_CODE
@@ -2009,6 +2011,23 @@ static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
 
+/* other values will be created as #defines later */
+enum pci_bus_speed {
+       PCI_SPEED_UNKNOWN = 0xff,
+};
+
+enum pcie_link_width {
+       PCIE_LNK_WIDTH_RESRV    = 0x00,
+       PCIE_LNK_X1             = 0x01,
+       PCIE_LNK_X2             = 0x02,
+       PCIE_LNK_X4             = 0x04,
+       PCIE_LNK_X8             = 0x08,
+       PCIE_LNK_X12            = 0x0C,
+       PCIE_LNK_X16            = 0x10,
+       PCIE_LNK_X32            = 0x20,
+       PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
+};
+
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
 #define i_private u.generic_ip
 #endif /* >= RHEL 5.0 */
@@ -2082,6 +2101,7 @@ extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
 #else /* 2.6.19 */
 #include <linux/aer.h>
 #include <linux/string.h>
+#include <linux/pci_hotplug.h>
 #endif /* < 2.6.19 */
 
 /*****************************************************************************/
@@ -2249,6 +2269,10 @@ extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
 #define SUPPORTED_2500baseX_Full (1 << 15)
 #endif
 
+#ifndef ETH_P_PAUSE
+#define ETH_P_PAUSE 0x8808
+#endif
+
 #else /* 2.6.22 */
 #define ETH_TYPE_TRANS_SETS_DEV
 #define HAVE_NETDEV_STATS_IN_NETDEV
@@ -2423,6 +2447,7 @@ static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#define INCLUDE_PM_QOS_PARAMS_H
 #include <linux/pm_qos_params.h>
 #else /* >= 3.2.0 */
 #include <linux/pm_qos.h>
@@ -2848,10 +2873,16 @@ static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev)
 #endif
 #else /* < 2.6.32 */
 #if (RHEL_RELEASE_CODE && \
-     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \
      (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
 #define HAVE_RHEL6_NET_DEVICE_EXTENDED
-#endif /* RHEL >= 6.1 && RHEL < 7.0 */
+#endif /* RHEL >= 6.2 && RHEL < 7.0 */
+#if (RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_NET_DEVICE_OPS_EXT
+#define HAVE_NDO_SET_FEATURES
+#endif /* RHEL >= 6.6 && RHEL < 7.0 */
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 #ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
 #define HAVE_NETDEV_OPS_FCOE_ENABLE
@@ -2943,6 +2974,7 @@ static inline bool pci_is_pcie(struct pci_dev *dev)
 #if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5))
 #define HAVE_ETHTOOL_GSRSSH
 #define HAVE_RHEL6_SRIOV_CONFIGURE
+#define HAVE_RXFH_NONCONST
 #endif /* RHEL > 6.5 */
 #endif /* RHEL >= 6.4 && RHEL < 7.0 */
 
@@ -3149,6 +3181,10 @@ static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev)
 #endif
 #endif /* 2.6.0 => 2.6.34 */
 
+#define PCIE_SPEED_2_5GT 0x14
+#define PCIE_SPEED_5_0GT 0x15
+#define PCIE_SPEED_8_0GT 0x16
+
 #else /* < 2.6.34 */
 #define HAVE_SYSTEM_SLEEP_PM_OPS
 #ifndef HAVE_SET_RX_MODE
@@ -3163,6 +3199,10 @@ ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
                                   const void __user *from, size_t count);
 #define simple_write_to_buffer _kc_simple_write_to_buffer
 
+#ifndef PCI_EXP_LNKSTA_NLW_SHIFT
+#define PCI_EXP_LNKSTA_NLW_SHIFT 4
+#endif
+
 #ifndef numa_node_id
 #define numa_node_id() 0
 #endif
@@ -3172,9 +3212,6 @@ ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
 #ifdef HAVE_TX_MQ
 #include <net/sch_generic.h>
 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
-#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)))
-#define kstrtoul(a, b, c)  ((*(c)) = simple_strtoul((a), &(a), (b)))
-#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
 void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
 #define netif_set_real_num_tx_queues  _kc_netif_set_real_num_tx_queues
@@ -3263,6 +3300,15 @@ do {                                                             \
 #define HAVE_8021P_SUPPORT
 #endif
 
+/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+     !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0)))
+static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb)
+{
+       return;
+}
+#endif
+
 #else /* < 2.6.36 */
 
 #define HAVE_PM_QOS_REQUEST_ACTIVE
@@ -3409,6 +3455,9 @@ static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
 #define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
 #endif
 #endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)))
+#define kstrtoul(a, b, c)  ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */
 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
 extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16);
 #define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q))
@@ -3542,7 +3591,7 @@ struct _kc_ethtool_rx_flow_spec {
 #endif /* kfree_rcu */
 #ifndef kstrtol_from_user
 #define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r)
-static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, 
+static inline int _kc_kstrtol_from_user(const char __user *s, size_t count,
                                        unsigned int base, long *res)
 {
        /* sign, base 2 representation, newline, terminator */
@@ -3680,6 +3729,7 @@ static inline void __kc_skb_frag_unref(skb_frag_t *frag)
 #define HAVE_SKB_L4_RXHASH
 #endif
 #define HAVE_IOMMU_PRESENT
+#define HAVE_PM_QOS_REQUEST_LIST_NEW
 #endif /* < 3.2.0 */
 
 #if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
@@ -3756,7 +3806,6 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
-#define skb_tx_timestamp(skb) do {} while (0)
 
 #ifndef ether_addr_equal
 static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
@@ -3794,6 +3843,10 @@ static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
 #define MDIO_EEE_10GKR         0x0040  /* 10G KR EEE cap */
 #endif
 
+#ifndef __GFP_MEMALLOC
+#define __GFP_MEMALLOC 0
+#endif
+
 #ifndef eth_random_addr
 #define eth_random_addr _kc_eth_random_addr
 static inline void _kc_eth_random_addr(u8 *addr)
@@ -4037,6 +4090,8 @@ static inline bool __kc_is_link_local_ether_addr(const u8 *addr)
 #define HAVE_ENCAP_CSUM_OFFLOAD
 #endif
 
+#define HAVE_GRE_ENCAP_OFFLOAD
+
 #ifndef HAVE_SRIOV_CONFIGURE
 #define HAVE_SRIOV_CONFIGURE
 #endif
@@ -4159,10 +4214,17 @@ extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) )
 #else /* >= 3.11.0 */
 #define HAVE_NDO_SET_VF_LINK_STATE
+#define HAVE_SKB_INNER_PROTOCOL
 #endif /* >= 3.11.0 */
 
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
+extern int __kc_pcie_get_minimum_link(struct pci_dev *dev,
+                                     enum pci_bus_speed *speed,
+                                     enum pcie_link_width *width);
+#ifndef pcie_get_minimum_link
+#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w)
+#endif
 #else /* >= 3.12.0 */
 #if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))
 #define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
@@ -4178,6 +4240,15 @@ extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask);
 #ifndef u64_stats_init
 #define u64_stats_init(a) do { } while(0)
 #endif
+#ifndef BIT_ULL
+#define BIT_ULL(n) (1ULL << (n))
+#endif
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0))
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#endif
+
 #else /* >= 3.13.0 */
 #define HAVE_VXLAN_CHECKS
 #if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24))
@@ -4224,6 +4295,10 @@ static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb,
 
 #else
 
+#ifndef HAVE_ENCAP_TSO_OFFLOAD
+#define HAVE_ENCAP_TSO_OFFLOAD
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+
 #ifndef HAVE_VXLAN_RX_OFFLOAD
 #define HAVE_VXLAN_RX_OFFLOAD
 #endif /* HAVE_VXLAN_RX_OFFLOAD */
@@ -4270,7 +4345,8 @@ static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src)
 /*****************************************************************************/
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) )
 
-#if (!( UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30)))
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \
+     !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30)))
 #define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh
 #define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh
 #endif
@@ -4348,7 +4424,7 @@ static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev,
 #else
        return 0;
 #endif
-       
+
 }
 #define __dev_mc_sync __kc_dev_mc_sync
 
@@ -4365,11 +4441,39 @@ static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev,
 }
 #define __dev_mc_unsync __kc_dev_mc_unsync
 #endif /* __dev_uc_sync */
+
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#endif
+
 #else
 #define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
 #endif /* 3.16.0 */
 
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) )
+#ifndef timespec64
+#define timespec64 timespec
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+       return ts;
+}
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+       return ts64;
+}
+#define timespec64_equal timespec_equal
+#define timespec64_compare timespec_compare
+#define set_normalized_timespec64 set_normalized_timespec
+#define timespec64_add_safe timespec_add_safe
+#define timespec64_add timespec_add
+#define timespec64_sub timespec_sub
+#define timespec64_valid timespec_valid
+#define timespec64_valid_strict timespec_valid_strict
+#define timespec64_to_ns timespec_to_ns
+#define ns_to_timespec64 ns_to_timespec
+#define ktime_to_timespec64 ktime_to_timespec
+#define timespec64_add_ns timespec_add_ns
+#endif /* timespec64 */
 #define hlist_add_behind(_a, _b) hlist_add_after(_b, _a)
 #else
 #define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
@@ -4392,6 +4496,7 @@ extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_l
 #else /*  3.18.0 */
 #define HAVE_SKBUFF_CSUM_LEVEL
 #define HAVE_SKB_XMIT_MORE
+#define HAVE_SKB_INNER_PROTOCOL_TYPE
 #endif /* 3.18.0 */
 
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) )
@@ -4402,24 +4507,128 @@ extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_l
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
 /* netdev_phys_port_id renamed to netdev_phys_item_id */
 #define netdev_phys_item_id netdev_phys_port_id
+
+#ifndef NETDEV_RSS_KEY_LEN
 #define NETDEV_RSS_KEY_LEN (13 * 4)
+#endif
+#if ( !(RHEL_RELEASE_CODE && \
+       (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && \
+       (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) )
 #define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len)
+#endif /* RHEL_RELEASE_CODE */
 extern void __kc_netdev_rss_key_fill(void *buffer, size_t len);
 #define SPEED_20000 20000
 #define SPEED_40000 40000
-#else
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+#ifndef dev_alloc_pages
+#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order))
+#endif
+#ifndef dev_alloc_page
+#define dev_alloc_page() dev_alloc_pages(0)
+#endif
+#if !defined(eth_skb_pad) && !defined(skb_put_padto)
+/**
+ *     __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size
+ *     @skb: buffer to pad
+ *     @len: minimal length
+ *
+ *     Pads up a buffer to ensure the trailing bytes exist and are
+ *     blanked. If the buffer already contains sufficient data it
+ *     is untouched. Otherwise it is extended. Returns zero on
+ *     success. The skb is freed on error.
+ */
+static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+       unsigned int size = skb->len;
+
+       if (unlikely(size < len)) {
+               len -= size;
+               if (skb_pad(skb, len))
+                       return -ENOMEM;
+               __skb_put(skb, len);
+       }
+       return 0;
+}
+#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len)
+
+static inline int __kc_eth_skb_pad(struct sk_buff *skb)
+{
+       return __kc_skb_put_padto(skb, ETH_ZLEN);
+}
+#define eth_skb_pad(skb) __kc_eth_skb_pad(skb)
+#endif /* eth_skb_pad && skb_put_padto */
+
+#ifndef napi_alloc_skb
+static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length)
+{
+       return netdev_alloc_skb_ip_align(napi->dev, length);
+}
+#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len)
+#endif /* napi_alloc_skb */
+#define HAVE_CONFIG_PM_RUNTIME
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define NDO_BRIDGE_GETLINK_HAS_FILTER_MASK_PARAM
+#define HAVE_RXFH_HASHFUNC
+#endif /* RHEL_RELEASE_CODE */
+#else /* 3.19.0 */
 #define HAVE_NDO_FDB_ADD_VID
-/* ethtool get/set_rxfh function got a new argument */
 #define HAVE_RXFH_HASHFUNC
 #endif /* 3.19.0 */
 
 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) )
+/* vlan_tx_xx functions got renamed to skb_vlan */
+#ifndef skb_vlan_tag_get
+#define skb_vlan_tag_get vlan_tx_tag_get
+#endif
+#ifndef skb_vlan_tag_present
+#define skb_vlan_tag_present vlan_tx_tag_present
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#endif
 #else
 #define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
-/* vlan_tx_xx functions got renamed to skb_vlan */
-#define vlan_tx_tag_get skb_vlan_tag_get
-#define vlan_tx_tag_present skb_vlan_tag_present
 #define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
 #endif /* 3.20.0 */
 
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) )
+#ifndef NO_PTP_SUPPORT
+#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#include <linux/timecounter.h>
+#else
+#include <linux/clocksource.h>
+#endif
+static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+       tc->nsec += delta;
+}
+#define timecounter_adjtime __kc_timecounter_adjtime
+#endif
+#else
+#define HAVE_PTP_CLOCK_INFO_GETTIME64
+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#endif /* 4,1,0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9))
+#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)))
+static inline bool page_is_pfmemalloc(struct page __maybe_unused *page)
+{
+#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC
+       return page->pfmemalloc;
+#else
+       return false;
+#endif
+}
+#endif /* !SLES12sp1 */
+#else
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#endif /* 4.1.9 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0))
+#else
+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+#endif /* 4.2.0 */
+
 #endif /* _KCOMPAT_H_ */