]> review.fuel-infra Code Review - packages/trusty/i40e-dkms.git/commitdiff
[Do not merge!] i40e-dkms package 76/16376/6
authorAlexandr Nevenchannyy <anevenchannyy@mirantis.com>
Fri, 22 Jan 2016 12:15:49 +0000 (15:15 +0300)
committerAlexandr Nevenchannyy <anevenchannyy@mirantis.com>
Tue, 26 Jan 2016 12:20:08 +0000 (12:20 +0000)
This is initial commit for i40e-dkms-1.3.47 package for Rackspace Lab
with i710 ethernet cards

Change-Id: I842df20b5f0252b148ba1d7dae085cd2bb6c8077

60 files changed:
debian/README.Debian [new file with mode: 0755]
debian/changelog [new file with mode: 0755]
debian/compat [new file with mode: 0755]
debian/control [new file with mode: 0755]
debian/copyright [new file with mode: 0755]
debian/dirs [new file with mode: 0755]
debian/postinst [new file with mode: 0755]
debian/prerm [new file with mode: 0755]
debian/rules [new file with mode: 0755]
i40e-dkms-1.3.47/Makefile [new file with mode: 0644]
i40e-dkms-1.3.47/common.postinst [new file with mode: 0755]
i40e-dkms-1.3.47/i40e-1.3.47/COPYING [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/README [new file with mode: 0755]
i40e-dkms-1.3.47/i40e-1.3.47/SUMS [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/dkms.conf [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/i40e.7 [new file with mode: 0755]
i40e-dkms-1.3.47/i40e-1.3.47/i40e.spec [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/pci.updates [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/scripts/dump_tables [new file with mode: 0755]
i40e-dkms-1.3.47/i40e-1.3.47/scripts/set_irq_affinity [new file with mode: 0755]
i40e-dkms-1.3.47/i40e-1.3.47/src/Makefile [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/Kbuild [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/Module.supported [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq_cmd.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_alloc.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_common.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_configfs.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb_nl.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_debugfs.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_devids.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_diag.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_diag.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ethtool.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_helper.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_lan_hmc.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_lan_hmc.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_main.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_nvm.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_osdep.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_prototype.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ptp.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_register.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_status.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_type.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.c [new file with mode: 0644]
i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.h [new file with mode: 0644]

diff --git a/debian/README.Debian b/debian/README.Debian
new file mode 100755 (executable)
index 0000000..833af3d
--- /dev/null
@@ -0,0 +1,5 @@
+i40e DKMS module for Debian
+
+This package was automatically generated by the DKMS system,
+for distribution on Debian based operating systems.
+
diff --git a/debian/changelog b/debian/changelog
new file mode 100755 (executable)
index 0000000..7541b22
--- /dev/null
@@ -0,0 +1,6 @@
+i40e-dkms (1.3.47) i40-dkms Divergent package; urgency=low
+
+  * Initial package build for Rackspace Intel Lab
+
+ -- Mirantis Openstack Scale Team <anevenchannyy@mirantis.com>  Fri, 22 January 2016 15:10:19 +0000
+
diff --git a/debian/compat b/debian/compat
new file mode 100755 (executable)
index 0000000..7f8f011
--- /dev/null
@@ -0,0 +1 @@
+7
diff --git a/debian/control b/debian/control
new file mode 100755 (executable)
index 0000000..ffb4473
--- /dev/null
@@ -0,0 +1,11 @@
+Source: i40e-dkms
+Section: misc
+Priority: optional
+Maintainer: Mirantis Openstack Scale Team <anevenchannyy@mirantis.com>
+Build-Depends: debhelper (>= 7), dkms
+Standards-Version: 3.8.1
+
+Package: i40e-dkms
+Architecture: all
+Depends: dkms (>= 1.95), ${misc:Depends}
+Description: i40e driver in DKMS format.
diff --git a/debian/copyright b/debian/copyright
new file mode 100755 (executable)
index 0000000..92fcd2d
--- /dev/null
@@ -0,0 +1,31 @@
+
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: i40e-dkms
+Source: http://sourceforge.net/projects/e1000/files/i40e%20stable/1.2.48/
+
+Files: *
+Copyright: Copyright(c) 2013 - 2015 Intel Corporation.
+License: GPL-2+
+
+License: GPL-2+
+ This program is free software; you can redistribute it
+ and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ .
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE.  See the GNU General Public License for more
+ details.
+ .
+ You should have received a copy of the GNU General Public
+ License along with this package; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ Boston, MA  02110-1301 USA
+ .
+ On Debian systems, the full text of the GNU General Public
+ License version 2 can be found in the file
+ `/usr/share/common-licenses/GPL-2'.
+
diff --git a/debian/dirs b/debian/dirs
new file mode 100755 (executable)
index 0000000..b601f22
--- /dev/null
@@ -0,0 +1 @@
+usr/src
diff --git a/debian/postinst b/debian/postinst
new file mode 100755 (executable)
index 0000000..80bb73d
--- /dev/null
@@ -0,0 +1,49 @@
+#!/bin/sh
+# Copyright (C) 2002-2005 Flavio Stanchina
+# Copyright (C) 2005-2006 Aric Cyr
+# Copyright (C) 2007 Mario Limonciello
+# Copyright (C) 2009 Alberto Milone
+
+set -e
+
+NAME=i40e
+PACKAGE_NAME=$NAME-dkms
+DEB_NAME=$(echo $PACKAGE_NAME | sed 's,_,-,')
+CVERSION=`dpkg-query -W -f='${Version}' $DEB_NAME | awk -F "-" '{print $1}' | cut -d\: -f2`
+ARCH=`dpkg --print-architecture`
+
+dkms_configure () {
+       for POSTINST in /usr/lib/dkms/common.postinst "/usr/share/$PACKAGE_NAME/postinst"; do
+               if [ -f "$POSTINST" ]; then
+                       "$POSTINST" "$NAME" "$CVERSION" "/usr/share/$PACKAGE_NAME" "$ARCH" "$2"
+                       return $?
+               fi
+               echo "WARNING: $POSTINST does not exist." >&2
+       done
+       echo "ERROR: DKMS version is too old and $PACKAGE_NAME was not" >&2
+       echo "built with legacy DKMS support." >&2
+       echo "You must either rebuild $PACKAGE_NAME with legacy postinst" >&2
+       echo "support or upgrade DKMS to a more current version." >&2
+       return 1
+}
+
+case "$1" in
+       configure)
+               dkms_configure
+       ;;
+
+       abort-upgrade|abort-remove|abort-deconfigure)
+       ;;
+
+       *)
+               echo "postinst called with unknown argument \`$1'" >&2
+               exit 1
+       ;;
+esac
+
+# dh_installdeb will replace this with shell code automatically
+# generated by other debhelper scripts.
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/prerm b/debian/prerm
new file mode 100755 (executable)
index 0000000..7254308
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+NAME=i40e
+VERSION=1.3.47
+
+set -e
+
+case "$1" in
+    remove|upgrade|deconfigure)
+      if [  "`dkms status -m $NAME`" ]; then
+         dkms remove -m $NAME -v $VERSION --all
+      fi
+    ;;
+
+    failed-upgrade)
+    ;;
+
+    *)
+        echo "prerm called with unknown argument \`$1'" >&2
+        exit 1
+    ;;
+esac
+
+#DEBHELPER#
+
+exit 0
+
+
diff --git a/debian/rules b/debian/rules
new file mode 100755 (executable)
index 0000000..d2a9f0f
--- /dev/null
@@ -0,0 +1,54 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+DEB_NAME=i40e
+NAME=i40e
+VERSION=1.3.47
+
+configure: configure-stamp
+configure-stamp:
+       dh_testdir
+       touch configure-stamp
+
+
+build: build-stamp
+
+build-stamp: configure-stamp 
+       dh_testdir
+       $(MAKE)
+       touch $@
+
+clean:
+       dh_testdir
+       dh_testroot
+       rm -f build-stamp configure-stamp
+       -$(MAKE) clean
+       dh_clean
+
+install: build
+       dh_testdir
+       dh_testroot
+       dh_prep
+       dh_installdirs
+       $(MAKE) DESTDIR=$(CURDIR)/debian/$(DEB_NAME)-dkms NAME=$(NAME) VERSION=$(VERSION) install
+
+binary-arch: build install
+
+binary-indep: build install
+       dh_testdir
+       dh_testroot
+       dh_link
+       dh_strip
+       dh_compress
+       dh_fixperms
+       dh_installdeb
+       dh_shlibdeps
+       dh_gencontrol
+       dh_md5sums
+       dh_builddeb
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install configure
diff --git a/i40e-dkms-1.3.47/Makefile b/i40e-dkms-1.3.47/Makefile
new file mode 100644 (file)
index 0000000..57e13d8
--- /dev/null
@@ -0,0 +1,28 @@
+#/usr/bin/make
+SRC = $(DESTDIR)/usr/src
+SHARE = $(DESTDIR)/usr/share/$(NAME)-dkms
+
+all:
+
+clean:
+
+install:
+
+#source tree
+ifeq ("$(wildcard $(NAME)-$(VERSION))", "$(NAME)-$(VERSION)")
+       install -d "$(SRC)"
+       cp -a $(NAME)-$(VERSION) $(SRC)
+       chmod 644 -R "$(SRC)/$(NAME)-$(VERSION)"
+endif
+
+#tarball, possibly with binaries
+ifeq ("$(wildcard $(NAME)-$(VERSION).dkms.tar.gz)", "$(NAME)-$(VERSION).dkms.tar.gz")
+       install -d "$(SHARE)"
+       install -m 644 $(NAME)-$(VERSION).dkms.tar.gz "$(SHARE)"
+endif
+
+#postinst, only if we are supporting legacy mode
+ifeq ("$(wildcard common.postinst)", "common.postinst")
+       install -d "$(SHARE)"
+       install -m 755 $(PREFIX)/usr/lib/dkms/common.postinst $(SHARE)/postinst
+endif
diff --git a/i40e-dkms-1.3.47/common.postinst b/i40e-dkms-1.3.47/common.postinst
new file mode 100755 (executable)
index 0000000..d8cf3a7
--- /dev/null
@@ -0,0 +1,295 @@
+#!/bin/sh
+# Copyright (C) 2002-2005 Flavio Stanchina
+# Copyright (C) 2005-2006 Aric Cyr
+# Copyright (C) 2007 Mario Limonciello
+# Copyright (C) 2009 Alberto Milone
+
+set -e
+
+uname_s=$(uname -s)
+
+_get_kernel_dir() {
+    KVER=$1
+    case ${uname_s} in
+       Linux)          DIR="/lib/modules/$KVER/build" ;;
+       GNU/kFreeBSD)   DIR="/usr/src/kfreebsd-headers-$KVER/sys" ;;
+    esac
+    echo $DIR
+}
+
+_check_kernel_dir() {
+    DIR=$(_get_kernel_dir $1)
+    case ${uname_s} in
+       Linux)          test -e $DIR/include ;;
+       GNU/kFreeBSD)   test -e $DIR/kern && test -e $DIR/conf/kmod.mk ;;
+       *)              return 1 ;;
+    esac
+    return $?
+}
+
+# Check the existence of a kernel named as $1
+_is_kernel_name_correct() {
+    CORRECT="no"
+    KERNEL_NAME=$1
+
+    for kernel in /boot/config-*; do
+        KERNEL=${kernel#*-}
+        if [ "${KERNEL}" = "${KERNEL_NAME}" ]; then
+            CORRECT="yes"
+            break
+        fi
+    done
+
+    echo $CORRECT
+}
+
+
+# Get the most recent kernel on Debian based systems. This keeps
+# into account both the version and the ABI. If the current kernel
+# is the most recent kernel then the function will print a null string.
+_get_newest_kernel_debian() {
+    NEWEST_KERNEL=
+    NEWEST_VERSION=
+    NEWEST_ABI=
+
+    for kernel in /boot/config-*; do
+        KERNEL=${kernel#*-}
+        KERNEL_VERSION=${KERNEL%%-*}
+        ABI=${KERNEL#*-}
+        ABI=${ABI%%-*}
+
+        if [ -z "$NEWEST_KERNEL" ]; then
+            # The 1st time get a version which is bigger than $1
+            COMPARE_TO=$1
+        else
+            # Get the biggest version
+            COMPARE_TO="$NEWEST_VERSION-$NEWEST_ABI"
+        fi
+
+        # if $kernel is greater than $COMPARE_TO
+        if [ `dpkg --compare-versions "$KERNEL_VERSION-$ABI" gt "$COMPARE_TO" && echo "yes" || \
+              echo "no"` = "yes" ]; then
+            NEWEST_KERNEL=$KERNEL
+            NEWEST_VERSION=$KERNEL_VERSION
+            NEWEST_ABI=$ABI
+        fi
+    done
+
+    echo "$NEWEST_KERNEL"
+}
+
+# Get the most recent kernel in Rhel based systems. If the current kernel
+# is the most recent kernel then the function will print a null string.
+_get_newest_kernel_rhel() {
+    NEWEST_KERNEL=
+
+    LAST_INSTALLED_KERNEL=$(rpm -q --whatprovides kernel  --last | grep kernel -m1 | cut -f1 -d' ')
+
+    LIK_FORMATTED_NAME=$(rpm -q $LAST_INSTALLED_KERNEL --queryformat="%{VERSION}-%{RELEASE}.%{ARCH}\n")
+
+    if [ `echo $LIK_FORMATTED_NAME | grep 2.6 >/dev/null` ]; then
+        # Fedora and Suse
+        NEWEST_KERNEL=$LIK_FORMATTED_NAME
+    else
+        # Hack for Mandriva where $LIK_FORMATTED_NAME is broken
+        LIK_NAME=$(rpm -q $LAST_INSTALLED_KERNEL --queryformat="%{NAME}\n")
+        LIK_TYPE=${LIK_NAME#kernel-}
+        LIK_TYPE=${LIK_TYPE%%-*}
+        LIK_STRIPPED=${LIK_NAME#kernel-}
+        LIK_STRIPPED=${LIK_STRIPPED#$LIK_TYPE-}
+        LIK_STRIPPED_BASE=${LIK_STRIPPED%%-*}
+        LIK_STRIPPED_END=${LIK_STRIPPED#$LIK_STRIPPED_BASE-}
+        LIK_FINAL=$LIK_STRIPPED_BASE-$LIK_TYPE-$LIK_STRIPPED_END
+
+        NEWEST_KERNEL=$LIK_FINAL
+    fi
+
+    echo $NEWEST_KERNEL
+}
+
+# Get the newest kernel on Debian and Rhel based systems.
+get_newest_kernel() {
+    NEWEST_KERNEL=
+    # Try Debian first as rpm can be installed in Debian based distros
+    if [ -e /usr/bin/dpkg ]; then
+        # If DEB based
+        CURRENT_KERNEL=$1
+        CURRENT_VERSION=${CURRENT_KERNEL%%-*}
+        CURRENT_ABI=${CURRENT_KERNEL#*-}
+        CURRENT_FLAVOUR=${CURRENT_ABI#*-}
+        CURRENT_ABI=${CURRENT_ABI%%-*}
+        NEWEST_KERNEL=$(_get_newest_kernel_debian "$CURRENT_VERSION-$CURRENT_ABI")
+
+    elif [ `which rpm >/dev/null` ]; then
+        # If RPM based
+        NEWEST_KERNEL=$(_get_newest_kernel_rhel)
+    fi
+
+    # Make sure that kernel name that we extracted corresponds to an installed
+    # kernel
+    if [ -n "$NEWEST_KERNEL" ] && [ `_is_kernel_name_correct $NEWEST_KERNEL` = "no" ]; then
+        NEWEST_KERNEL=
+    fi
+
+    echo $NEWEST_KERNEL
+}
+
+NAME=$1
+VERSION=$2
+TARBALL_ROOT=$3
+ARCH=$4
+UPGRADE=$5
+
+if [ -z "$NAME" ] || [ -z "$VERSION" ]; then
+    echo "Need NAME, and VERSION defined"
+    echo "ARCH is optional"
+    exit 1
+fi
+
+KERNELS=$(ls /lib/modules/)
+CURRENT_KERNEL=$(uname -r)
+
+#We never want to keep an older version side by side to prevent conflicts
+if [ -e "/var/lib/dkms/$NAME/$VERSION" ]; then
+    echo "Removing old $NAME-$VERSION DKMS files..."
+    dkms remove -m $NAME -v $VERSION --all
+fi
+
+#Load new files, by source package and by tarball
+if [ -f "$TARBALL_ROOT/$NAME-$VERSION.dkms.tar.gz" ]; then
+    if ! dkms ldtarball --archive "$TARBALL_ROOT/$NAME-$VERSION.dkms.tar.gz"; then
+        echo ""
+        echo ""
+        echo "Unable to load DKMS tarball $TARBALL_ROOT/$NAME-$VERSION.dkms.tar.gz."
+        echo "Common causes include: "
+        echo " - You must be using DKMS 2.1.0.0 or later to support binaries only"
+        echo "   distribution specific archives."
+        echo " - Corrupt distribution specific archive"
+        echo ""
+        echo ""
+        exit 2
+    fi
+elif [ -d "/usr/src/$NAME-$VERSION" ]; then
+    echo "Loading new $NAME-$VERSION DKMS files..."
+    dkms add -m $NAME -v $VERSION > /dev/null
+fi
+
+# On 1st installation, let us look for a directory
+# in /lib/modules which matches `uname -r`. If none
+# is found it is possible that buildd is being used
+# and that uname -r is giving us the name of the
+# kernel used by the buildd machine.
+#
+# If this is the case we try to build the kernel
+# module for each kernel which has a directory in
+# /lib/modules. Furthermore we will have to tell
+# DKMS which architecture it should build the module
+# for (e.g. if the buildd machine is using a
+# 2.6.24-23-xen 64bit kernel).
+#
+# NOTE: if the headers are not installed then the
+#       module won't be built, as usual
+if [ -z "$UPGRADE" ]; then
+    echo "First Installation: checking all kernels..."
+    for KERNEL in $KERNELS; do
+        if [ ${KERNEL} = ${CURRENT_KERNEL} ]; then
+            # Kernel found
+            KERNELS=$CURRENT_KERNEL
+            break
+        fi
+    done
+else
+    KERNELS=$CURRENT_KERNEL
+fi
+
+# Here we look for the most recent kernel so that we can
+# build the module for it (in addition to doing it for the
+# current kernel.
+NEWEST_KERNEL=$(get_newest_kernel "$KERNELS")
+
+# If the current kernel doesn't come from the host of a chroot
+if [ `_is_kernel_name_correct $CURRENT_KERNEL` = "yes" ]; then
+    # See if it's worth building the module for both the newest kernel
+    # and for the current kernel
+    if [ -n "$NEWEST_KERNEL" ] && [ ${CURRENT_KERNEL} != ${NEWEST_KERNEL} ]; then
+        echo "Building for $CURRENT_KERNEL and $NEWEST_KERNEL"
+        KERNELS="$CURRENT_KERNEL $NEWEST_KERNEL"
+    else
+        echo "Building only for $CURRENT_KERNEL"
+    fi
+# The current kernel is not useful as it's the host's
+else
+    echo "It is likely that $CURRENT_KERNEL belongs to a chroot's host"
+
+    # Let's use only the newest kernel
+    if [ -n "$NEWEST_KERNEL" ]; then
+        KERNELS="$NEWEST_KERNEL"
+        echo "Building only for $NEWEST_KERNEL"
+    fi
+fi
+
+if [ -n "$ARCH" ]; then
+    if which lsb_release >/dev/null && [ $(lsb_release -s -i) = "Ubuntu" ]; then
+        case $ARCH in
+            amd64)
+                ARCH="x86_64"
+                ;;
+            lpia|i?86)
+                ARCH="i686"
+                ;;
+        esac
+    fi
+    echo "Building for architecture $ARCH"
+    ARCH="-a $ARCH"
+fi
+
+for KERNEL in $KERNELS; do
+    dkms_status=`dkms status -m $NAME -v $VERSION -k $KERNEL $ARCH`
+    if [ `echo $KERNEL | grep -c "BOOT"` -gt 0 ]; then
+        echo ""
+        echo "Module build and install for $KERNEL was skipped as "
+        echo "it is a BOOT variant"
+        continue
+    fi
+
+
+    #if the module isn't yet built, try to build it
+    if [ `echo $dkms_status | grep -c ": built"` -eq 0 ]; then
+        if [ ! -L /var/lib/dkms/$NAME/$VERSION/source ]; then
+            echo "This package appears to be a binaries-only package"
+            echo " you will not be able to build against kernel $KERNEL"
+            echo " since the package source was not provided"
+            continue
+        fi
+        if _check_kernel_dir $KERNEL; then
+            echo "Building initial module for $KERNEL"
+            set +e
+            dkms build -m $NAME -v $VERSION -k $KERNEL $ARCH > /dev/null
+            case $? in
+            9)
+                set -e
+                echo "Skipped."
+                continue
+                ;;
+            0)
+                set -e
+                echo "Done."
+                ;;
+            *)
+                exit $?
+                ;;
+            esac
+            dkms_status=`dkms status -m $NAME -v $VERSION -k $KERNEL $ARCH`
+        else
+            echo "Module build for the currently running kernel was skipped since the"
+            echo "kernel source for this kernel does not seem to be installed."
+        fi
+    fi
+
+    #if the module is built (either pre-built or just now), install it
+    if [ `echo $dkms_status | grep -c ": built"` -eq 1 ] && 
+       [ `echo $dkms_status | grep -c ": installed"` -eq 0 ]; then
+        dkms install -m $NAME -v $VERSION -k $KERNEL $ARCH
+    fi
+done
+
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/COPYING b/i40e-dkms-1.3.47/i40e-1.3.47/COPYING
new file mode 100644 (file)
index 0000000..e2fed1b
--- /dev/null
@@ -0,0 +1,344 @@
+
+"This software program is licensed subject to the GNU General Public License 
+(GPL). Version 2, June 1991, available at 
+<http://www.gnu.org/licenses/gpl-2.0.html>"
+
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/README b/i40e-dkms-1.3.47/i40e-1.3.47/README
new file mode 100755 (executable)
index 0000000..9e82a61
--- /dev/null
@@ -0,0 +1,934 @@
+
+i40e Linux* Base Driver for the Intel(R) XL710 Ethernet Controller Family
+===============================================================================
+
+===============================================================================
+
+September 25, 2015
+
+===============================================================================
+
+Contents
+--------
+
+- Overview
+- Identifying Your Adapter
+- Building and Installation
+- Command Line Parameters
+- Intel(R) i40e Ethernet Flow Director
+- Additional Features & Configurations
+- Known Issues
+
+
+================================================================================
+
+
+Important Notes
+---------------
+
+Configuring SR-IOV for improved network security
+------------------------------------------------
+
+In a virtualized environment, on Intel(R) Server Adapters that support SR-IOV,
+the virtual function (VF) may be subject to malicious behavior. Software-
+generated layer two frames, like IEEE 802.3x (link flow control), IEEE 802.1Qbb
+(priority based flow-control), and others of this type, are not expected and
+can throttle traffic between the host and the virtual switch, reducing
+performance. To resolve this issue, configure all SR-IOV enabled ports for
+VLAN tagging. This configuration allows unexpected, and potentially malicious,
+frames to be dropped.
+
+
+
+Overview
+--------
+
+This document describes the i40e Linux* Base Driver for the XL710 Ethernet Controller Family of Adapters.
+
+The Linux* base driver supports the following kernel versions:
+2.6.32 and newer
+
+It includes support for Linux supported x86_64 systems.
+
+This driver is only supported as a loadable module at this time. Intel is
+not supplying patches against the kernel source to allow for static linking of
+the drivers.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel adapter. All hardware requirements listed apply to
+use with Linux.
+
+The following features are now available in supported kernels:
+- Native VLANs
+- Channel Bonding (teaming)
+- SNMP
+- Generic Receive Offload
+
+Adapter teaming is implemented using the native Linux Channel bonding
+module. This is included in supported Linux kernels.
+Channel Bonding documentation can be found in the Linux kernel source:
+/documentation/networking/bonding.txt
+
+The driver information previously displayed in the /proc file system is not
+supported in this release.
+
+Driver information can be obtained using ethtool, lspci, and ifconfig.
+Instructions on updating ethtool can be found in the section Additional 
+Configurations later in this document.
+
+
+
+Identifying Your Adapter
+------------------------
+The driver in this release is compatible with XL710 and X710-based Intel
+Ethernet Network Connections.
+
+For information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
+http://support.intel.com/support/go/network/adapter/proidguide.htm
+
+For the best performance, make sure the latest NVM/FW is installed on your device
+ and that you are using the newest drivers.
+
+For the latest NVM/FW images and Intel network drivers, refer to the
+following website and select your adapter.
+http://www.intel.com/support
+
+
+SFP+ Devices with Pluggable Optics
+----------------------------------
+
+SR Modules
+----------
+  Intel        DUAL RATE 1G/10G SFP+ SR (bailed)       E10GSFPSR
+
+LR Modules
+---------- 
+  Intel        DUAL RATE 1G/10G SFP+ LR (bailed        E10GSFPLR
+
+1G SFP Modules
+--------------
+The following is a list of 3rd party SFP modules that have received some
+testing. Not all modules are applicable to all devices.
+
+Supplier       Type            Part Numbers
+Finisar                1000BASE-T      SFP FCLF-8251-3
+Kinnex A       1000BASE-T      SFP XSFP-T-RJ12-0101-DLL
+Avago          1000BASE-T      SFP ABCU-5710RZ
+
+QSFP+ Modules
+-------------
+  Intel        TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed)        E40GQSFPSR
+    QSFP+ 1G speed is not supported on XL710 based devices.
+
+X710/XL710 Based SFP+ adapters support passive QSFP+ Direct Attach cables.
+Intel recommends using Intel optics and cables. Other modules may function
+but are not validated by Intel. Contact Intel for supported media types.
+
+
+================================================================================
+
+
+Building and Installation
+-------------------------
+
+To build a binary RPM* package of this driver, run 'rpmbuild -tb
+i40e-<x.x.x>.tar.gz', where <x.x.x> is the version number for the driver tar file.
+
+NOTES:
+
+- For the build to work properly, the currently running kernel MUST match
+  the version and configuration of the installed kernel sources. If you have
+  just recompiled the kernel reboot the system before building.
+- RPM functionality has only been tested in Red Hat distributions.
+
+1. Move the base driver tar file to the directory of your choice. For
+   example, use '/home/username/i40e' or '/usr/local/src/i40e'.
+
+2. Untar/unzip the archive, where <x.x.x> is the version number for the
+   driver tar file:
+   tar zxf i40e-<x.x.x>.tar.gz
+
+3. Change to the driver src directory, where <x.x.x> is the version number
+   for the driver tar:
+   cd i40e-<x.x.x>/src/
+
+4. Compile the driver module:
+   # make install
+   The binary will be installed as:
+   /lib/modules/<KERNEL VERSION>/kernel/drivers/net/i40e/i40e.[k]o
+
+   The install location listed above is the default location. This may differ
+   for various Linux distributions.
+
+5. Load the module using the modprobe command:
+   modprobe <i40e> [parameter=port1_value,port2_value]
+
+   Make sure that any older i40e drivers are removed from the kernel before
+   loading the new module:
+   rmmod i40e; modprobe i40e
+
+6. Assign an IP address to the interface by entering the following,
+   where ethX is the interface name that was shown in dmesg after modprobe:
+   
+   ip address add <IP_address>/<netmask bits> dev ethX
+
+7. Verify that the interface works. Enter the following, where IP_address
+   is the IP address for another machine on the same subnet as the interface
+   that is being tested:
+   ping <IP_address>
+
+NOTE:
+   For certain distributions like (but not limited to) RedHat Enterprise
+   Linux 7 and Ubuntu, once the driver is installed the initrd/initramfs
+   file may need to be updated to prevent the OS loading old versions
+   of the i40e driver. The dracut utility may be used on RedHat
+   distributions:
+       # dracut --force
+   For Ubuntu:
+       # update-initramfs -u
+
+
+================================================================================
+
+
+Command Line Parameters
+-----------------------
+In general, ethtool and other OS specific commands are used to configure user
+changeable parameters after the driver is loaded. The i40e driver only supports
+the max_vfs kernel parameter on older kernels that do not have the standard
+sysfs interface. The only other module parameter supported is the debug
+parameter that can control the default logging verbosity of the driver.
+
+If the driver is built as a module, the following optional parameters are used
+by entering them on the command line with the modprobe command using this
+syntax:
+modprobe i40e [<option>=<VAL1>]
+
+There needs to be a <VAL#> for each network port in the system supported by
+this driver. The values will be applied to each instance, in function order.
+For example:
+modprobe i40e max_vfs=7
+
+The default value for each parameter is generally the recommended setting,
+unless otherwise noted.
+
+
+
+max_vfs
+-------
+Valid Range:
+1-32 (X710 based devices)
+1-64 (XL710 based devices)
+NOTE: This parameter is only used on kernel 3.7.x and below. On kernel 3.8.x
+and above, use sysfs to enable VFs. For example:
+#echo $num_vf_enabled > /sys/class/net/$dev/device/sriov_numvfs        //enable VFs
+#echo 0 > /sys/class/net/$dev/device/sriov_numvfs      //disable VFs
+This parameter adds support for SR-IOV. It causes the driver to spawn up to
+max_vfs worth of virtual functions.
+Some hardware configurations support fewer SR-IOV instances, as the whole
+XL710 controller (all functions) is limited to 128 SR-IOV interfaces in total.
+NOTE: When SR-IOV mode is enabled, hardware VLAN filtering
+and VLAN tag stripping/insertion will remain enabled. Please remove the old
+VLAN filter before the new VLAN filter is added. For example,
+ip link set eth0 vf 0 vlan 100 // set vlan 100 for VF 0
+ip link set eth0 vf 0 vlan 0   // Delete vlan 100
+ip link set eth0 vf 0 vlan 200 // set a new vlan 200 for VF 0
+
+
+Configuring SR-IOV for improved network security
+------------------------------------------------
+
+In a virtualized environment, on Intel(R) Server Adapters that support SR-IOV,
+the virtual function (VF) may be subject to malicious behavior. Software-
+generated layer two frames, like IEEE 802.3x (link flow control), IEEE 802.1Qbb
+(priority based flow-control), and others of this type, are not expected and
+can throttle traffic between the host and the virtual switch, reducing
+performance. To resolve this issue, configure all SR-IOV enabled ports for
+VLAN tagging. This configuration allows unexpected, and potentially malicious,
+frames to be dropped.
+
+
+Configuring VLAN tagging on SR-IOV enabled adapter ports
+--------------------------------------------------------
+
+To configure VLAN tagging for the ports on an SR-IOV enabled adapter,
+use the following command. The VLAN configuration should be done 
+before the VF driver is loaded or the VM is booted.
+
+$ ip link set dev <PF netdev id> vf <id> vlan <vlan id>
+
+For example, the following instructions will configure PF eth0 and 
+the first VF on VLAN 10.
+$ ip link set dev eth0 vf 0 vlan 10
+.
+
+Intel(R) Ethernet Flow Director
+-------------------------------
+The Flow Director performs the following tasks:
+
+  - Directs receive packets according to their flows to different queues.
+  - Enables tight control on routing a flow in the platform.
+  - Matches flows and CPU cores for flow affinity.
+  - Supports multiple parameters for flexible flow classification and load
+    balancing.
+
+NOTES:
+
+  - The Flow Director is enabled only if the kernel supports multiple
+    transmit queues.
+  - An included script (set_irq_affinity) automates setting the IRQ to
+    CPU affinity.
+  - The i40e Linux driver does not support configuration of the mask field.
+    It only accepts rules that completely qualify a certain flow type.
+
+ethtool commands:
+
+  - To enable or disable the Flow Director
+
+       # ethtool -K ethX ntuple <on|off>
+
+       When disabling ntuple filters all the user programed filters are flushed
+       from the driver cache and hardware. Filters must be re-added if they are
+       needed when ntuple is re-enabled.
+
+  - To add a filter that directs packet to queue 2, use -U or -N switch
+
+       # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+       192.168.10.2 src-port 2000 dst-port 2001 action 2 [loc 1]
+
+  - To see the list of filters currently present
+       # ethtool <-u|-n> ethX
+
+Application Targeted Routing (ATR) Perfect Filters
+--------------------------------------------------
+ATR is enabled by default when the kernel is in multiple transmit queue mode.
+An ATR flow director filter rule is added when a TCP-IP flow starts and is
+deleted when the flow ends. When a TCP-IP Flow Director rule is added from
+ethtool (Sideband filter), ATR is turned off by the driver. To re-enable ATR,
+the sideband can be disabled with the ethtool -K option. If sideband is
+re-enabled after ATR is re-enabled, ATR remains enabled until a TCP-IP flow
+is added. When all TCP-IP sideband rules are deleted, ATR is automatically
+re-enabled.
+
+Packets that match the ATR rules are counted in fdir_atr_match stats in
+ethtool, which also can be used to verify whether ATR rules still exist.
+
+Sideband Perfect Filters
+------------------------
+Sideband Perfect Filters is an interface for loading the filter table that
+funnels all flow into queue_0 unless an alternative queue is specified
+using "action." If action is used, any flow that matches the filter criteria
+will be directed to the appropriate queue. Rules may be deleted from the
+table. This is done via
+
+  ethtool -U ethX delete N
+
+  where N is the rule number to be deleted, as specified in the loc value in
+  the filter add command.
+
+  If the queue is defined as -1, the filter drops matching packets. To account
+  for Sideband filter matches, the fdir_sb_match stats in ethtool can be used.
+
+  In addition, rx-N.rx_packets shows the number of packets processed by the
+  Nth queue.
+
+NOTES:
+Receive Packet Steering (RPS) and Receive Flow Steering (RFS) are not compatible
+with Flow Director. If Flow Director is enabled, these will be disabled.
+
+The VLAN field for Flow Director is not explicitly supported in the i40e
+driver.
+
+When filter rules are added from Sideband or ATR and the Flow Director filter
+table is full, the ATR rule is turned off by the driver. Subsequently, the
+Sideband filter rule is then turned off. When space becomes available in the
+filter table through filter rule deletion (i.e., an ATR rule or Sideband rule
+is deleted), the Sideband and ATR rule additions are turned back on.
+
+Occasionally, when the filter table is full, you will notice HW errors when
+you try to add new rules. The i40e driver will call for a filter flush and
+sideband filter list replay. This will help flush any stale ATR rules and
+create space.
+
+
+================================================================================
+
+
+Additional Features and Configurations
+-------------------------------------------
+
+
+Configuring the Driver on Different Distributions
+-------------------------------------------------
+
+Configuring a network driver to load properly when the system is started is
+distribution dependent. Typically, the configuration process involves adding
+an alias line to /etc/modules.conf or /etc/modprobe.conf as well as editing
+other system startup scripts and/or configuration files. Many popular Linux
+distributions ship with tools to make these changes for you. To learn the
+proper way to configure a network device for your system, refer to your
+distribution documentation. If during this process you are asked for the
+driver or module name, the name for the Base Driver is i40e.
+
+
+Viewing Link Messages
+---------------------
+
+Link messages will not be displayed to the console if the distribution is
+restricting system messages. In order to see network driver link messages on
+your console, set dmesg to eight by entering the following:
+dmesg -n 8
+
+NOTE: This setting is not saved across reboots.
+
+
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit
+(MTU) to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number:
+
+   ifconfig eth<x> mtu 9000 up
+
+This setting is not saved across reboots. The setting change can be made
+permanent by adding 'MTU=9000' to the file:
+/etc/sysconfig/network-scripts/ifcfg-eth<x> for RHEL or to the file
+/etc/sysconfig/network/<config_file> for SLES.
+
+NOTES:
+- The maximum MTU setting for Jumbo Frames is 9706. This value coincides
+  with the maximum Jumbo Frames size of 9728 bytes.
+- This driver will attempt to use multiple page sized buffers to receive
+  each jumbo packet. This should help to avoid buffer starvation issues
+  when allocating receive packets.
+
+
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The latest
+ethtool version is required for this functionality. Download it at
+http://ftp.kernel.org/pub/software/network/ethtool/
+
+Supported ethtool Commands and Options
+--------------------------------------
+-n --show-nfc
+  Retrieves the receive network flow classification configurations.
+
+rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6
+  Retrieves the hash options for the specified network traffic type.
+
+-N --config-nfc
+  Configures the receive network flow classification.
+
+rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 m|v|t|s|d|f|n|r...
+  Configures the hash options for the specified network traffic type.
+
+  udp4 UDP over IPv4
+  udp6 UDP over IPv6
+
+  f Hash on bytes 0 and 1 of the Layer 4 header of the rx packet.
+  n Hash on bytes 2 and 3 of the Layer 4 header of the rx packet.
+
+
+NAPI
+----
+NAPI (Rx polling mode) is supported in the i40e driver.
+For more information on NAPI, see
+ftp://robur.slu.se/pub/Linux/net-development/NAPI/usenix-paper.tgz.
+
+
+Flow Control
+------------
+
+Ethernet Flow Control (IEEE 802.3x) can be configured with ethtool to enable
+receiving and transmitting pause frames for i40e. When transmit is enabled,
+pause frames are generated when the receive packet buffer crosses a predefined
+threshold. When receive is enabled, the transmit unit will halt for the time
+delay specified when a pause frame is received. 
+
+Flow Control is disabled by default.
+
+Use ethtool to change the flow control settings.
+
+ethtool:
+ethtool -A eth? autoneg off rx on tx on
+
+NOTE: You must have a flow control capable link partner.
+
+
+MAC and VLAN anti-spoofing feature
+----------------------------------
+
+When a malicious driver attempts to send a spoofed packet, it is dropped by
+the hardware and not transmitted.
+NOTE: This feature can be disabled for a specific Virtual Function (VF).
+ip link set <pf dev> vf <vf id> spoofchk {off|on}
+
+
+Support for UDP RSS
+-------------------
+
+This feature adds an ON/OFF switch for hashing over certain flow types. Only
+UDP can be turned on. The default setting is enabled .
+
+
+IEEE 1588 Precision Time Protocol (PTP) Hardware Clock (PHC)
+------------------------------------------------------------
+
+Precision Time Protocol (PTP) is used to synchronize clocks in a computer
+network and is supported in the i40e driver.
+
+I40E_PTP is a compile time flag. The user can enable it at compile time to add
+support for PTP from the driver. The flag is used by editing the make file
+as follows when it is being compiled:
+
+>make CFLAGS_EXTRA="-DI40E_PTP" install
+
+
+VXLAN Overlay HW Offloading
+---------------------------
+
+VXLAN Overlay HW Offloading is enabled by default. The i40e Linux driver
+features VXLAN Overlay HW Offloading support. To view and configure
+VXLAN on a VXLAN-overlay offload enabled device, use the following
+commands:
+
+  # ethtool -k ethX
+   (This command displays the offloads and their current state.)
+  # ethtool -K ethX tx-udp_tnl-segmentation [off|on]
+   (This enables/disables VXLAN support in the driver.)
+
+For more information on configuring your network for VXLAN overlay support,
+refer to the Intel Technical Brief, "Creating Overlay Networks Using Intel
+Ethernet Converged Network Adapters" (Intel Networking Division, August 2013):
+
+http://www.intel.com/content/dam/www/public/us/en/documents/technology-briefs/
+overlay-networks-using-converged-network-adapters-brief.pdf
+
+
+Multiple Functions per Port
+---------------------------
+
+On X710/XL710 based adapters that support it, you can set up multiple functions
+on each physical port. You configure these functions through the System
+Setup/BIOS.
+
+Minimum TX Bandwidth is the guaranteed minimum data transmission bandwidth, as
+a percentage of the full physical port link speed, that the partition will
+receive. The bandwidth the partition is awarded will never fall below the level
+you specify here.
+
+The range for the minimum bandwidth values is:
+1 to ((100 minus # of partitions on the physical port) plus 1)
+For example, if a physical port has 4 partitions, the range would be
+1 to ((100 - 4) + 1 = 97)
+
+The Maximum Bandwidth percentage represents the maximum transmit
+bandwidth allocated to the partition as a percentage of the full physical port
+link speed. The accepted range of values is 1-100. The value can be used as a
+limiter, should you chose that any one particular function not be able to
+consume 100% of a port's bandwidth (should it be available). The sum of
+all the values for Maximum Bandwidth is not restricted, because no more than
+100% of a port's bandwidth can ever be used.
+
+Once the initial configuration is complete, you can set different
+bandwidth allocations on each function as follows:
+1. Make a new directory named /config
+2. edit etc/fstab to include:
+
+       configfs /config configfs defaults
+
+3. Mount /config
+4. Load (or reload) the i40e driver
+5. Make a new directory under config for each partition upon which you wish
+   to configure the bandwidth.
+6. Three files will appear under the config/partition directory:
+   - max_bw
+   - min_bw
+   - commit
+   read from max_bw to get display the current maximum bandwidth setting.
+   write to max_bw to set the maximum bandwidth for this function.
+   read from min_bw to display the current minimum bandwidth setting.
+   Write to min_bw to set the minimum bandwidth for this function.
+   Write a '1' to commit to save your changes.
+
+Notes: -commit is write only. Attempting to read it will result in an
+        error.
+       -Writing to commit is only supported on the first function of
+        a given port. Writing to a subsequent function will result in an
+        error.
+       -Oversubscribing the minimum bandwidth is not supported. The underlying
+        device's NVM will set the minimum bandwidth to supported values in an
+        indeterminate manner. Remove all of the directories under config and
+        reload them to see what the actual values are.
+       -To unload the driver you must first remove the directories created in
+        step 5, above.
+
+Example of Setting the minimum and maximum bandwidth (assume there are four
+function on the port eth6-eth9, and that eth6 is the first function on
+the port):
+
+ # mkdir /config/eth6
+ # mkdir /config/eth7
+ # mkdir /config/eth8
+ # mkdir /config/eth9
+
+ # echo 50 > /config/eth6/min_bw
+ # echo 100 > /config/eth6/max_bw
+ # echo 20 > /config/eth7/min_bw
+ # echo 100 > /config/eth7/max_bw
+ # echo 20 > /config/eth8/min_bw
+ # echo 100 > /config/eth8/max_bw
+ # echo 10 > /config/eth9/min_bw
+ # echo 25 > /config/eth9/max_bw
+
+ # echo 1 > /config/eth6/commit
+
+
+Data Center Bridging (DCB)
+--------------------------
+DCB is a configuration Quality of Service implementation in hardware.
+It uses the VLAN priority tag (802.1p) to filter traffic. That means
+that there are 8 different priorities that traffic can be filtered into.
+It also enables priority flow control (802.1Qbb) which can limit or
+eliminate the number of dropped packets during network stress. Bandwidth
+can be allocated to each of these priorities, which is enforced at the
+hardware level (802.1Qaz).
+
+Adapter firmware implements LLDP and DCBX protocol agents as per 802.1AB
+and 802.1Qaz respectively. The firmware based DCBX agent runs in willing
+mode only and can accept settings from a DCBX capable peer. Software
+configuration of DCBX parameters via dcbtool/lldptool are not supported.
+
+The i40e driver implements the DCB netlink interface layer to allow
+user-space to communicate with the driver and query DCB configuration for
+the port.
+
+
+Interrupt Rate Limiting
+-----------------------
+
+The Intel(R) Ethernet Controller XL710 family supports an interrupt rate
+limiting mechanism. The user can control, via ethtool, the number of
+microseconds between interrupts.
+
+Syntax:
+# ethtool -C ethX rx-usecs-high N
+
+Valid Range: 0-235 (0=no limit)
+
+The range of 0-235 microseconds provides an effective range of 4,310 to
+250,000 interrupts per second. The value of rx-usecs-high can be set
+independently of rx-usecs and tx-usecs in the same ethtool command, and
+is also independent of the adaptive interrupt moderation algorithm. The
+underlying hardware supports granularity in 4-microsecond intervals, so
+adjacent values may result in the same interrupt rate.
+
+One possible use case is the following:
+# ethtool -C ethX adaptive-rx off adaptive-tx off rx-usecs-high 20 rx-usecs 5
+tx-usecs 5
+
+The above command would disable adaptive interrupt moderation, and allow a
+maximum of 5 microseconds before indicating a receive or transmit was complete.
+ However, instead of resulting in as many as 200,000 interrupts per second, it
+limits total interrupts per second to 50,000 via the rx-usecs-high parameter.
+
+
+Performance Optimization:
+-------------------------
+
+Driver defaults are meant to fit a wide variety of workloads, but if further
+optimization is required we recommend experimenting with the following settings.
+
+Pin the adapter's IRQs to specific cores by disabling the irqbalance service
+and using the included set_irq_affinity script.
+The following settings will distribute the IRQs across all the cores evenly:
+  # scripts/set_irq_affinity -x all <interface1> , [ <interface2>, ... ]
+The following settings will distribute the IRQs across all the cores that are
+local to the adapter (same NUMA node):
+  # scripts/set_irq_affinity -x local <interface1> ,[ <interface2>, ... ]
+Please see the script's help text for further options.
+
+For very CPU intensive workloads, we recommend pinning the IRQs to all cores.
+For IP Forwarding: Disable Adaptive ITR and lower rx and tx interrupts per
+queue using ethtool.
+# ethtool <interface> adaptive-rx off adaptive-tx off rx-usecs 125 tx-usecs 125
+Setting rx-usecs and tx-usecs to 125 will limit interrupts to about 8000
+interrupts per second per queue.
+
+For lower CPU utilization: Disable Adaptive ITR and lower rx and tx interrupts
+per queue using ethtool.
+# ethtool <interface> adaptive-rx off adaptive-tx off rx-usecs 250 tx-usecs 250
+Setting rx-usecs and tx-usecs to 250 will limit interrupts to about 4000
+interrupts per second per queue.
+
+For lower latency: Disable Adaptive ITR and ITR by setting rx and tx to 0
+using ethtool.
+# ethtool <interface> adaptive-rx off adaptive-tx off rx-usecs 0 tx-usecs 0
+
+
+================================================================================
+
+
+Known Issues/Troubleshooting
+----------------------------
+
+
+Incomplete messages in the system log
+-------------------------------------
+
+The NVMUpdate utility may write several incomplete messages in the system log.
+These messages take the form:
+  in the driver Pci Ex config function byte index 114
+  in the driver Pci Ex config function byte index 115
+These messages can be ignored.
+
+
+Bad checksum counter incorrectly increments when using VxLAN
+------------------------------------------------------------
+
+When passing non-UDP traffic over a VxLAN interface, the port.rx_csum_bad
+counter increments for the packets.
+
+
+Virtual machine does not get link
+---------------------------------
+
+If the virtual machine has more than one virtual port assigned to it, and those
+virtual ports are bound to different physical ports, you may not get link on all
+of the virtual ports. The following command may work around the issue:
+ethtool -r <PF>
+Where <PF> is the PF interface in the host, for example: p5p1. You may need to
+run the command more than once to get link on all virtual ports.
+
+
+MAC address of Virtual Function changes unexpectedly
+----------------------------------------------------
+
+If a Virtual Function's MAC address is not assigned in the host, then the
+VF (virtual function) driver will use a random MAC address. This random MAC
+address may change each time the VF driver is reloaded. You can assign a
+static MAC address in the host machine. This static MAC address will survive
+a VF driver reload.
+
+
+Enabling TSO may cause data integrity issues
+--------------------------------------------
+
+Enabling TSO on kernel 3.14 or newer may cause data integrity issues.
+Kernel 3.10 and older do not exhibit this behavior.
+
+
+Changing the number of Rx or Tx queues with ethtool -L may cause a kernel panic
+-------------------------------------------------------------------------------
+
+Changing the number of Rx or Tx queues with ethtool -L while traffic is flowing
+and the interface is up may cause a kernel panic. Bring the interface down first
+to avoid the issue. For example:
+  ip link set ethx down
+  ethtool -L ethx combined 4
+
+
+Adding a Flow Director Sideband rule fails incorrectly
+------------------------------------------------------
+
+If you try to add a Flow Director rule when no more sideband rule space is
+available, i40e logs an error that the rule could not be added, but ethtool
+returns success. You can remove rules to free up space. In addition, remove
+the rule that failed. This will evict it from the driver's cache.
+
+
+Flow Director Sideband Logic adds duplicate filter
+--------------------------------------------------
+
+The Flow Director Sideband Logic adds a duplicate filter in the software filter
+list if the location is not specified or the specified location differs from
+the previous location but has the same filter criteria. In this case, the
+second of the two filters that appear is the valid one in hardware and it
+decides the filter action.
+
+
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
+
+Due to the default ARP behavior on Linux, it is not possible to have one
+system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected. All Ethernet interfaces will
+respond to IP traffic for any IP address assigned to the system. This results
+in unbalanced receive traffic.
+
+If you have multiple interfaces in a server, either turn on ARP filtering by
+entering:
+echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+
+This only works if your kernel's version is higher than 2.4.5.
+
+
+NOTE: This setting is not saved across reboots. The configuration change can
+be made permanent by adding the following line to the file /etc/sysctl.conf:
+net.ipv4.conf.all.arp_filter = 1
+
+Another alternative is to install the interfaces in separate broadcast domains
+(either in different switches or in a switch partitioned to VLANs).
+
+
+UDP Stress Test Dropped Packet Issue
+------------------------------------
+
+Under small packet UDP stress with the i40edriver, the system may
+drop UDP packets due to socket buffers being full. Setting the driver Flow
+Control variables to the minimum may resolve the issue. You may also try
+increasing the kernel's default buffer sizes by changing the values in
+
+  /proc/sys/net/core/rmem_default and rmem_max
+
+
+Unplugging Network Cable While ethtool -p is Running
+----------------------------------------------------
+
+In kernel versions 2.6.32 and newer, unplugging the network cable while
+ethtool -p is running will cause the system to become unresponsive to
+keyboard commands, except for control-alt-delete. Restarting the system
+appears to be the only remedy.
+
+
+Rx Page Allocation Errors
+-------------------------
+
+'Page allocation failure. order:0' errors may occur under stress with kernels
+2.6.25 and newer. This is caused by the way the Linux kernel reports this
+stressed condition.
+
+
+
+Disable GRO when routing/bridging
+---------------------------------
+
+Due to a known kernel issue, GRO must be turned off when routing/bridging. GRO
+can be turned off via ethtool.
+ethtool -K ethX gro off
+
+where ethX is the ethernet interface being modified.
+
+
+Lower than expected performance
+-------------------------------
+
+Some PCIe x8 slots are actually configured as x4 slots. These slots have
+insufficient bandwidth for full line rate with dual port and quad port
+devices. In addition, if you put a PCIe Generation 3-capable adapter
+into a PCIe Generation 2 slot, you cannot get full bandwidth. The driver
+detects this situation and writes the following message in the system log:
+
+"PCI-Express bandwidth available for this card is not sufficient for optimal
+performance. For optimal performance a x8 PCI-Express slot is required."
+
+If this error occurs, moving your adapter to a true PCIe Generation 3 x8 slot
+ will resolve the issue.
+
+
+ethtool may incorrectly display SFP+ fiber module as direct attached cable
+--------------------------------------------------------------------------
+
+Due to kernel limitations, port type can only be correctly displayed on kernel
+2.6.33 or greater.
+
+
+Running ethtool -t ethX command causes break between PF and test client
+-----------------------------------------------------------------------
+
+When there are active VFs, "ethtool -t" performs a full diagnostic. In the
+process, it resets itself and all attached VFs. The VF drivers encouter a
+disruption, but are able to recover.
+
+
+Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS
+under Linux KVM
+------------------------------------------------------------------------
+
+KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
+includes traditional PCIe devices, as well as SR-IOV-capable devices using
+Intel XL710-based controllers.
+
+
+Unable to obtain DHCP lease on boot with RedHat
+-----------------------------------------------
+
+For configurations where the auto-negotiation process takes more than 5
+seconds, the boot script may fail with the following message:
+"ethX: failed. No link present. Check cable?"
+
+If this error appears even though the presence of a link can be confirmed
+using ethtool ethX, try setting "LINKDELAY=5" in
+/etc/sysconfig/network-scripts/ifcfg-ethX.
+
+NOTE: Link time can take up to 30 seconds. Adjust LINKDELAY value accordingly.
+
+Alternatively, NetworkManager can be used to configure the interfaces, which
+avoids the set timeout. For configuration instructions of NetworkManager
+refer to the documentation provided by your distribution.
+
+
+Loading i40e driver in 3.2.x and newer kernels displays kernel tainted message
+------------------------------------------------------------------------------
+
+Due to recent kernel changes, loading an out of tree driver causes the kernel
+to be tainted.
+
+
+================================================================================
+
+
+Support
+-------
+For general information, go to the Intel support website at:
+www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+http://sourceforge.net/projects/i40e
+If an issue is identified with the released source code on a supported
+kernel with a supported adapter, email the specific information related to the
+issue to i40e-devel@lists.sf.net.
+
+
+================================================================================
+
+
+License
+-------
+
+This program is free software; you can redistribute it and/or modify it under
+the terms and conditions of the GNU General Public License, version 2, as
+published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
+St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+The full GNU General Public License is included in this distribution in the
+file called "COPYING".
+
+Intel(R) XL710/X710 Network Driver
+Intel(R) XL710/X710 Virtual Function Network Driver
+Copyright(c) 2014-2015 Intel Corporation.
+================================================================================
+
+
+
+Trademarks
+----------
+
+Intel, Itanium, and Pentium are trademarks or registered trademarks of Intel
+Corporation or its subsidiaries in the United States and other countries.
+
+* Other names and brands may be claimed as the property of others.
+
+
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/SUMS b/i40e-dkms-1.3.47/i40e-1.3.47/SUMS
new file mode 100644 (file)
index 0000000..1b96aea
--- /dev/null
@@ -0,0 +1,47 @@
+46789     4 i40e-1.3.47/pci.updates
+63894     4 i40e-1.3.47/src/i40e/i40e_helper.h
+44588     1 i40e-1.3.47/src/i40e/Module.supported
+13919    68 i40e-1.3.47/src/i40e/i40e_adminq_cmd.h
+07159   138 i40e-1.3.47/src/i40e/kcompat.h
+19688    82 i40e-1.3.47/src/i40e/i40e_debugfs.c
+05671    86 i40e-1.3.47/src/i40e/i40e_txrx.c
+29128     6 i40e-1.3.47/src/i40e/i40e_lan_hmc.h
+33481    42 i40e-1.3.47/src/i40e/i40e_nvm.c
+04585    30 i40e-1.3.47/src/i40e/i40e_adminq.c
+33767     2 i40e-1.3.47/src/i40e/Kbuild
+07400     3 i40e-1.3.47/src/i40e/i40e_alloc.h
+30794    66 i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c
+11873     7 i40e-1.3.47/src/i40e/i40e_dcb.h
+60649     2 i40e-1.3.47/src/i40e/i40e_diag.h
+29182    21 i40e-1.3.47/src/i40e/i40e_prototype.h
+29883    23 i40e-1.3.47/src/i40e/i40e_ptp.c
+36071    37 i40e-1.3.47/src/i40e/i40e_dcb.c
+49741     4 i40e-1.3.47/src/i40e/i40e_osdep.h
+00677     9 i40e-1.3.47/src/i40e/i40e_dcb_nl.c
+37534   320 i40e-1.3.47/src/i40e/i40e_main.c
+17556   221 i40e-1.3.47/src/i40e/i40e_register.h
+57597    52 i40e-1.3.47/src/i40e/kcompat.c
+52476     5 i40e-1.3.47/src/i40e/i40e_adminq.h
+50800     6 i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h
+57192    47 i40e-1.3.47/src/i40e/i40e_fcoe.c
+42138     9 i40e-1.3.47/src/i40e/i40e_hmc.h
+30403    52 i40e-1.3.47/src/i40e/i40e_type.h
+64716   105 i40e-1.3.47/src/i40e/i40e_ethtool.c
+22745     4 i40e-1.3.47/src/i40e/i40e_status.h
+60864    13 i40e-1.3.47/src/i40e/i40e_virtchnl.h
+23673     6 i40e-1.3.47/src/i40e/i40e_diag.c
+30055     2 i40e-1.3.47/src/i40e/i40e_devids.h
+23231    11 i40e-1.3.47/src/i40e/i40e_hmc.c
+45761    11 i40e-1.3.47/src/i40e/i40e_configfs.c
+12717   164 i40e-1.3.47/src/i40e/i40e_common.c
+25879    42 i40e-1.3.47/src/i40e/i40e_lan_hmc.c
+65207     5 i40e-1.3.47/src/i40e/i40e_fcoe.h
+10333    31 i40e-1.3.47/src/i40e/i40e.h
+22557    13 i40e-1.3.47/src/i40e/i40e_txrx.h
+48687    11 i40e-1.3.47/src/Makefile
+09576     6 i40e-1.3.47/scripts/set_irq_affinity
+53852     2 i40e-1.3.47/scripts/dump_tables
+02733    18 i40e-1.3.47/COPYING
+52865    10 i40e-1.3.47/i40e.spec
+18539    35 i40e-1.3.47/README
+08612     3 i40e-1.3.47/i40e.7
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/dkms.conf b/i40e-dkms-1.3.47/i40e-1.3.47/dkms.conf
new file mode 100644 (file)
index 0000000..7459e31
--- /dev/null
@@ -0,0 +1,8 @@
+MAKE="make -C src/ KERNELDIR=/lib/modules/${kernelver}/build  BUILD_KERNEL=${kernelver}"
+CLEAN="make -C src/ clean"
+BUILT_MODULE_NAME=i40e
+BUILT_MODULE_LOCATION=src/
+DEST_MODULE_LOCATION="/updates"
+PACKAGE_NAME=i40e-dkms
+PACKAGE_VERSION=1.3.47
+REMAKE_INITRD=yes
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/i40e.7 b/i40e-dkms-1.3.47/i40e-1.3.47/i40e.7
new file mode 100755 (executable)
index 0000000..deab376
--- /dev/null
@@ -0,0 +1,58 @@
+.\" LICENSE
+.\"
+.\" This software program is released under the terms of a license agreement between you ('Licensee') and Intel. Do not use or load this software or any associated materials (collectively, the 'Software') until you have carefully read the full terms and conditions of the LICENSE located in this software package. By loading or using the Software, you agree to the terms of this Agreement. If you do not agree with the terms of this Agreement, do not install or use the Software.
+.\"
+.\" * Other names and brands may be claimed as the property of others.
+.\"
+.
+.TH i40e 1 "February 06, 2015"
+.SH NAME
+i40e \-This file describes the Linux* Base Driver for the Intel Ethernet Controller XL710 Family of Controllers.
+.SH SYNOPSIS
+.PD 0.4v
+modprobe i40e [<option>=<VAL1>,<VAL2>,...]
+.PD 1v
+.SH DESCRIPTION
+This driver is intended for \fB2.6.32\fR and newer kernels. 
+This driver includes support for any 64 bit Linux supported system, 
+including Itanium(R)2, x86_64, PPC64,ARM, etc.
+.LP
+This driver is only supported as a loadable module at this time. Intel is
+not supplying patches against the kernel source to allow for static linking of
+the drivers.
+
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel adapter. All hardware requirements listed apply to
+use with Linux.
+.SH Jumbo Frames
+.LP
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit
+(MTU) to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number:
+
+   ifconfig eth<x> mtu 9000 up
+
+.LP
+NOTES:
+- The maximum MTU setting for Jumbo Frames is 9706. This value coincides
+  with the maximum Jumbo Frames size of 9728 bytes.
+- This driver will attempt to use multiple page sized buffers to receive
+  each jumbo packet. This should help to avoid buffer starvation issues
+  when allocating receive packets.
+See the section "Jumbo Frames" in the Readme.
+.SH SUPPORT
+.LP
+For additional information regarding building and installation,
+see the
+README
+included with the driver.
+For general information, go to the Intel support website at:
+.B www.intel.com/support/
+.LP
+If an issue is identified with the released source code on a supported
+kernel with a supported adapter, email the specific information related to the
+issue to i40e-devel@lists.sf.net.
+.LP
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/i40e.spec b/i40e-dkms-1.3.47/i40e-1.3.47/i40e.spec
new file mode 100644 (file)
index 0000000..f85a2a8
--- /dev/null
@@ -0,0 +1,399 @@
+Name: i40e
+Summary: Intel(R) Ethernet Connection XL710 Linux Driver
+Version: 1.3.47
+Release: 1
+Source: %{name}-%{version}.tar.gz
+Vendor: Intel Corporation
+License: GPL
+ExclusiveOS: linux
+Group: System Environment/Kernel
+Provides: %{name}
+URL: http://www.intel.com/network/connectivity/products/server_adapters.htm
+BuildRoot: %{_tmppath}/%{name}-%{version}-root
+# do not generate debugging packages by default - newer versions of rpmbuild
+# may instead need:
+#%define debug_package %{nil}
+%debug_package %{nil}
+# macros for finding system files to update at install time (pci.ids, pcitable)
+%define find() %(for f in %*; do if [ -e $f ]; then echo $f; break; fi; done)
+%define _pciids   /usr/share/pci.ids        /usr/share/hwdata/pci.ids
+%define _pcitable /usr/share/kudzu/pcitable /usr/share/hwdata/pcitable /dev/null
+%define pciids    %find %{_pciids}
+%define pcitable  %find %{_pcitable}
+Requires: kernel, fileutils, findutils, gawk, bash
+
+%description
+This package contains the Linux driver for the Intel(R) Ethernet Connection XL710 Family of devices.
+
+%prep
+%setup
+
+%build
+make -C src clean
+make -C src
+
+%install
+make -C src INSTALL_MOD_PATH=%{buildroot} MANDIR=%{_mandir} install
+# Append .new to driver name to avoid conflict with kernel RPM
+cd %{buildroot}
+find lib -name "i40e.*o" -exec mv {} {}.new \; \
+         -fprintf %{_builddir}/%{name}-%{version}/file.list "/%p.new\n"
+find lib/modules -name modules.* -exec rm -f {} \;
+
+
+%clean
+rm -rf %{buildroot}
+
+%files -f file.list
+%defattr(-,root,root)
+%{_mandir}/man7/i40e.7.gz
+%doc COPYING
+%doc README
+%doc file.list
+%doc pci.updates
+
+%post
+FL="%{_docdir}/%{name}-%{version}/file.list
+    %{_docdir}/%{name}/file.list"
+FL=$(for d in $FL ; do if [ -e $d ]; then echo $d; break; fi;  done)
+
+if [ -d /usr/local/lib/%{name} ]; then
+       rm -rf /usr/local/lib/%{name}
+fi
+if [ -d /usr/local/share/%{name} ]; then
+       rm -rf /usr/local/share/%{name}
+fi
+
+# Save old drivers (aka .o and .o.gz)
+echo "original pci.ids saved in /usr/local/share/%{name}";
+if [ "%{pcitable}" != "/dev/null" ]; then
+       echo "original pcitable saved in /usr/local/share/%{name}";
+fi
+for k in $(sed 's/\/lib\/modules\/\([0-9a-zA-Z_\.\-]*\).*/\1/' $FL) ; 
+do
+       d_drivers=/lib/modules/$k
+       d_usr=/usr/local/share/%{name}/$k
+       mkdir -p $d_usr
+       cd $d_drivers; find . -name %{name}.*o -exec cp --parents {} $d_usr \; -exec rm -f {} \;
+       cd $d_drivers; find . -name %{name}_*.*o -exec cp --parents {} $d_usr \; -exec rm -f {} \;
+       cd $d_drivers; find . -name %{name}.*o.gz -exec cp --parents {} $d_usr \; -exec rm -f {} \;
+       cd $d_drivers; find . -name %{name}_*.*o.gz -exec cp --parents {} $d_usr \; -exec rm -f {} \;
+       cp --parents %{pciids} /usr/local/share/%{name}/
+       if [ "%{pcitable}" != "/dev/null" ]; then
+               cp --parents %{pcitable} /usr/local/share/%{name}/
+       fi
+done
+
+# Add driver link
+for f in $(sed 's/\.new$//' $FL) ; do
+       ln -f $f.new $f 
+done
+
+# Check if kernel version rpm was built on IS the same as running kernel
+BK_LIST=$(sed 's/\/lib\/modules\/\([0-9a-zA-Z_\.\-]*\).*/\1/' $FL)
+MATCH=no
+for i in $BK_LIST
+do
+       if [ $(uname -r) == $i ] ; then
+               MATCH=yes
+               break
+       fi
+done
+if [ $MATCH == no ] ; then
+       echo -n "WARNING: Running kernel is $(uname -r).  "
+       echo -n "RPM supports kernels (  "
+       for i in $BK_LIST
+       do
+               echo -n "$i  "
+       done
+       echo ")"
+fi
+
+LD="%{_docdir}/%{name}";
+if [ -d %{_docdir}/%{name}-%{version} ]; then
+       LD="%{_docdir}/%{name}-%{version}";
+fi
+
+#Yes, this really needs bash
+bash -s %{pciids} \
+       %{pcitable} \
+       $LD/pci.updates \
+       $LD/pci.ids.new \
+       $LD/pcitable.new \
+       %{name} \
+<<"END"
+#! /bin/bash
+# $1 = system pci.ids file to update
+# $2 = system pcitable file to update
+# $3 = file with new entries in pci.ids file format
+# $4 = pci.ids output file
+# $5 = pcitable output file
+# $6 = driver name for use in pcitable file
+
+exec 3<$1
+exec 4<$2
+exec 5<$3
+exec 6>$4
+exec 7>$5
+driver=$6
+IFS=
+
+# pattern matching strings
+ID="[[:xdigit:]][[:xdigit:]][[:xdigit:]][[:xdigit:]]"
+VEN="${ID}*"
+DEV="  ${ID}*"
+SUB="          ${ID}*"
+TABLE_DEV="0x${ID}     0x${ID} \"*"
+TABLE_SUB="0x${ID}     0x${ID} 0x${ID} 0x${ID} \"*"
+
+line=
+table_line=
+ids_in=
+table_in=
+vendor=
+device=
+ids_device=
+table_device=
+subven=
+ids_subven=
+table_subven=
+subdev=
+ids_subdev=
+table_subdev=
+ven_str=
+dev_str=
+sub_str=
+
+# force a sub-shell to fork with a new stdin
+# this is needed if the shell is reading these instructions from stdin
+while true
+do
+       # get the first line of each data file to jump start things
+       exec 0<&3
+       read -r ids_in
+       if [ "$2" != "/dev/null" ];then
+       exec 0<&4
+       read -r table_in
+       fi
+
+       # outer loop reads lines from the updates file
+       exec 0<&5
+       while read -r line
+       do
+               # vendor entry
+               if [[ $line == $VEN ]]
+               then
+                       vendor=0x${line:0:4}
+                       ven_str=${line#${line:0:6}}
+                       # add entry to pci.ids
+                       exec 0<&3
+                       exec 1>&6
+                       while [[ $ids_in != $VEN ||
+                                0x${ids_in:0:4} < $vendor ]]
+                       do
+                               echo "$ids_in"
+                               read -r ids_in
+                       done
+                       echo "$line"
+                       if [[ 0x${ids_in:0:4} == $vendor ]]
+                       then
+                               read -r ids_in
+                       fi
+
+               # device entry
+               elif [[ $line == $DEV ]]
+               then
+                       device=`echo ${line:1:4} | tr "[:upper:]" "[:lower:]"`
+                       table_device=0x${line:1:4}
+                       dev_str=${line#${line:0:7}}
+                       ids_device=`echo ${ids_in:1:4} | tr "[:upper:]" "[:lower:]"`
+                       table_line="$vendor     $table_device   \"$driver\"     \"$ven_str|$dev_str\""
+                       # add entry to pci.ids
+                       exec 0<&3
+                       exec 1>&6
+                       while [[ $ids_in != $DEV ||
+                                $ids_device < $device ]]
+                       do
+                               if [[ $ids_in == $VEN ]]
+                               then
+                                       break
+                               fi
+                               if [[ $ids_device != ${ids_in:1:4} ]]
+                               then
+                                       echo "${ids_in:0:1}$ids_device${ids_in#${ids_in:0:5}}"
+                               else
+                                       echo "$ids_in"
+                               fi
+                               read -r ids_in
+                               ids_device=`echo ${ids_in:1:4} | tr "[:upper:]" "[:lower:]"`
+                       done
+                       if [[ $device != ${line:1:4} ]]
+                       then
+                               echo "${line:0:1}$device${line#${line:0:5}}"
+                       else
+                               echo "$line"
+                       fi
+                       if [[ $ids_device == $device ]]
+                       then
+                               read -r ids_in
+                       fi
+                       # add entry to pcitable
+                       if [ "$2" != "/dev/null" ];then
+                       exec 0<&4
+                       exec 1>&7
+                       while [[ $table_in != $TABLE_DEV ||
+                                ${table_in:0:6} < $vendor ||
+                                ( ${table_in:0:6} == $vendor &&
+                                  ${table_in:7:6} < $table_device ) ]]
+                       do
+                               echo "$table_in"
+                               read -r table_in
+                       done
+                       echo "$table_line"
+                       if [[ ${table_in:0:6} == $vendor &&
+                             ${table_in:7:6} == $table_device ]]
+                       then
+                               read -r table_in
+                       fi
+                       fi
+               # subsystem entry
+               elif [[ $line == $SUB ]]
+               then
+                       subven=`echo ${line:2:4} | tr "[:upper:]" "[:lower:]"`
+                       subdev=`echo ${line:7:4} | tr "[:upper:]" "[:lower:]"`
+                       table_subven=0x${line:2:4}
+                       table_subdev=0x${line:7:4}
+                       sub_str=${line#${line:0:13}}
+                       ids_subven=`echo ${ids_in:2:4} | tr "[:upper:]" "[:lower:]"`
+                       ids_subdev=`echo ${ids_in:7:4} | tr "[:upper:]" "[:lower:]"`
+                       table_line="$vendor     $table_device   $table_subven   $table_subdev   \"$driver\"     \"$ven_str|$sub_str\""
+                       # add entry to pci.ids
+                       exec 0<&3
+                       exec 1>&6
+                       while [[ $ids_in != $SUB ||
+                                $ids_subven < $subven ||
+                                ( $ids_subven == $subven && 
+                                  $ids_subdev < $subdev ) ]]
+                       do
+                               if [[ $ids_in == $VEN ||
+                                     $ids_in == $DEV ]]
+                               then
+                                       break
+                               fi
+                               if [[ ! (${ids_in:2:4} == "1014" &&
+                                        ${ids_in:7:4} == "052C") ]]
+                               then
+                                       if [[ $ids_subven != ${ids_in:2:4} || $ids_subdev != ${ids_in:7:4} ]]
+                                       then
+                                               echo "${ids_in:0:2}$ids_subven $ids_subdev${ids_in#${ids_in:0:11}}"
+                                       else
+                                               echo "$ids_in"
+                                       fi
+                               fi
+                               read -r ids_in
+                               ids_subven=`echo ${ids_in:2:4} | tr "[:upper:]" "[:lower:]"`
+                               ids_subdev=`echo ${ids_in:7:4} | tr "[:upper:]" "[:lower:]"`
+                       done
+                       if [[ $subven != ${line:2:4} || $subdev != ${line:7:4} ]]
+                       then
+                               echo "${line:0:2}$subven $subdev${line#${line:0:11}}"
+                       else
+                               echo "$line"
+                       fi
+                       if [[ $ids_subven == $subven  &&
+                             $ids_subdev == $subdev ]]
+                       then
+                               read -r ids_in
+                       fi
+                       # add entry to pcitable
+                       if [ "$2" != "/dev/null" ];then
+                       exec 0<&4
+                       exec 1>&7
+                       while [[ $table_in != $TABLE_SUB ||
+                                ${table_in:14:6} < $table_subven ||
+                                ( ${table_in:14:6} == $table_subven &&
+                                  ${table_in:21:6} < $table_subdev ) ]]
+                       do
+                               if [[ $table_in == $TABLE_DEV ]]
+                               then
+                                       break
+                               fi
+                               if [[ ! (${table_in:14:6} == "0x1014" &&
+                                        ${table_in:21:6} == "0x052C") ]]
+                               then
+                                       echo "$table_in"
+                               fi
+                               read -r table_in
+                       done
+                       echo "$table_line"
+                       if [[ ${table_in:14:6} == $table_subven &&
+                             ${table_in:21:6} == $table_subdev ]]
+                       then
+                               read -r table_in
+                       fi
+                       fi
+               fi
+
+               exec 0<&5
+       done
+
+       # print the remainder of the original files
+       exec 0<&3
+       exec 1>&6
+       echo "$ids_in"
+       while read -r ids_in
+       do
+               echo "$ids_in"
+       done
+
+       if [ "$2" != "/dev/null" ];then
+       exec 0>&4
+       exec 1>&7
+       echo "$table_in"
+       while read -r table_in
+       do
+               echo "$table_in"
+       done
+       fi
+
+       break
+done <&5
+
+exec 3<&-
+exec 4<&-
+exec 5<&-
+exec 6>&-
+exec 7>&-
+
+END
+
+mv -f $LD/pci.ids.new  %{pciids}
+if [ "%{pcitable}" != "/dev/null" ]; then
+mv -f $LD/pcitable.new %{pcitable}
+fi
+
+uname -r | grep BOOT || /sbin/depmod -a > /dev/null 2>&1 || true
+
+%preun
+# If doing RPM un-install
+if [ $1 -eq 0 ] ; then
+       FL="%{_docdir}/%{name}-%{version}/file.list
+               %{_docdir}/%{name}/file.list"
+       FL=$(for d in $FL ; do if [ -e $d ]; then echo $d; break; fi;  done)
+
+       # Remove driver link
+       for f in $(sed 's/\.new$//' $FL) ; do
+               rm -f $f
+       done
+
+       # Restore old drivers
+       if [ -d /usr/local/share/%{name} ]; then
+               cd /usr/local/share/%{name}; find . -name '%{name}.*o*' -exec cp --parents {} /lib/modules/ \;
+               cd /usr/local/share/%{name}; find . -name '%{name}_*.*o*' -exec cp --parents {} /lib/modules/ \;
+               rm -rf /usr/local/share/%{name}
+       fi
+fi
+
+%postun
+uname -r | grep BOOT || /sbin/depmod -a > /dev/null 2>&1 || true
+
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/pci.updates b/i40e-dkms-1.3.47/i40e-1.3.47/pci.updates
new file mode 100644 (file)
index 0000000..e0ae83f
--- /dev/null
@@ -0,0 +1,73 @@
+# updates for the system pci.ids file
+#
+# IMPORTANT!  Entries in this list must be sorted as they
+#             would appear in the system pci.ids file.  Entries
+#             are sorted by ven, dev, subven, subdev
+#             (numerical order).
+#
+8086  Intel Corporation
+       154c  XL710/X710 Virtual Function
+       1571  XL710/X710 Virtual Function
+       1572  Ethernet Controller X710 for 10GbE SFP+
+               1028 0000  Ethernet 10G X710 rNDC
+               1028 1f99  Ethernet 10G 4P X710/I350 rNDC
+               1028 1f9c  Ethernet 10G 4P X710 SFP+ rNDC
+               103c 0000  HPE Ethernet 10Gb 562SFP+ Adapter
+               103c 22fc  HPE Ethernet 10Gb 2-port 562FLR-SFP+ Adapter
+               103c 22fd  HPE Ethernet 10Gb 2-port 562SFP+ Adapter
+               1137 0000  Cisco(R) Ethernet Converged NIC X710-4
+               1137 013b  Cisco(R) Ethernet Converged NIC X710-4
+               17aa 0000  Lenovo ThinkServer X710 AnyFabric for 10GbE SFP+
+               17aa 4001  Lenovo ThinkServer X710-4 AnyFabric for 10GbE SFP+
+               17aa 4002  Lenovo ThinkServer X710-2 AnyFabric for 10GbE SFP+
+               8086 0000  Ethernet Converged Network Adapter X710
+               8086 0001  Ethernet Converged Network Adapter X710-4
+               8086 0002  Ethernet Converged Network Adapter X710-4
+               8086 0004  Ethernet Converged Network Adapter X710-4
+               8086 0005  Ethernet Converged Network Adapter X710
+               8086 0006  Ethernet Converged Network Adapter X710
+               8086 0007  Ethernet Converged Network Adapter X710-2
+               8086 0008  Ethernet Converged Network Adapter X710-2
+               8086 0009  Ethernet Controller X710 for 10GbE SFP+ 
+               8086 000a  Ethernet Controller X710 for 10GbE SFP+ 
+               8086 4005  Ethernet Controller X710 for 10GbE SFP+
+               8086 4006  Ethernet Controller X710 for 10GbE SFP+ 
+       1580  Ethernet Controller XL710 for 40GbE backplane
+       1581  Ethernet Controller X710 for 10GbE backplane
+               1028 0000  Ethernet 10G X710-k bNDC
+               1028 1f98  Ethernet 10G 4P X710-k bNDC
+               1028 1f9e  Ethernet 10G 2P X710-k bNDC
+               8086 0000  Ethernet Converged Network Adapter XL710-Q2
+       1583  Ethernet Controller XL710 for 40GbE QSFP+
+               1028 0000  Ethernet 40G 2P XL710 QSFP+ rNDC
+               1028 1f9f  Ethernet 40G 2P XL710 QSFP+ rNDC
+               108e 0000  Oracle Quad 10Gb Ethernet Adapter
+               108e 7b1b  Oracle Quad 10Gb Ethernet Adapter
+               1137 0000  Cisco(R) Ethernet Converged NIC XL710-Q2
+               1137 013c  Cisco(R) Ethernet Converged NIC XL710-Q2
+               8086 0000  Ethernet Converged Network Adapter XL710-Q2
+               8086 0001  Ethernet Converged Network Adapter XL710-Q2
+               8086 0002  Ethernet Converged Network Adapter XL710-Q2
+               8086 0003  Ethernet I/O Module XL710-Q2
+               8086 0004  Ethernet Server Adapter XL710-Q2OCP
+               8086 0006  Ethernet Converged Network Adapter XL710-Q2
+       1584  Ethernet Controller XL710 for 40GbE QSFP+
+               8086 0000  Ethernet Converged Network Adapter XL710-Q1
+               8086 0001  Ethernet Converged Network Adapter XL710-Q1
+               8086 0002  Ethernet Converged Network Adapter XL710-Q1
+               8086 0003  Ethernet I/O Module XL710-Q1
+               8086 0004  Ethernet Server Adapter XL710-Q1OCP
+       1585  Ethernet Controller X710 for 10GbE QSFP+
+       1586  Ethernet Controller X710 for 10GBASE-T
+               108e 0000  Ethernet Controller X710 for 10GBASE-T
+               108e 4857  Ethernet Controller X710 for 10GBASE-T
+       1587  Ethernet Controller XL710 for 20GbE backplane
+               103c 0000  HPE Flex-20 20Gb 2-port 660FLB Adapter
+               103c 22fe  HPE Flex-20 20Gb 2-port 660FLB Adapter
+       1588  Ethernet Controller XL710 for 20GbE backplane
+               103c 0000  HPE Flex-20 20Gb 2-port 660M Adapter
+               103c 22ff  HPE Flex-20 20Gb 2-port 660M Adapter
+       1589  Ethernet Controller X710/X557-AT 10GBASE-T
+               8086 0000  Ethernet Converged Network Adapter X710-T
+               8086 0001  Ethernet Converged Network Adapter X710-T4
+               8086 0002  Ethernet Converged Network Adapter X710-T4
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/scripts/dump_tables b/i40e-dkms-1.3.47/i40e-1.3.47/scripts/dump_tables
new file mode 100755 (executable)
index 0000000..87d510a
--- /dev/null
@@ -0,0 +1,69 @@
+#!/bin/bash
+# Dump Tables script
+# Copyright (C) 2015 Intel Corporation
+#
+# This script is used to generate a dump of the hardware state for
+# sending to linux.nics@intel.com for debugging purposes.  This
+# script will generate a dump_tables.gz output file that can be
+# uploaded or emailed.
+
+# Usage: dump_tables eth1
+
+DEV=$1
+. /sys/class/net/$DEV/device/uevent
+# DRIVER=i40e
+# PCI_CLASS=20000
+# PCI_ID=8086:1583
+# PCI_SUBSYS_ID=8086:0002
+# PCI_SLOT_NAME=0000:06:00.0
+# MODALIAS=pci:v00008086d00001583sv00008086sd00000002bc02sc00i00
+
+if [ "$DEV" == "" ]; then
+       echo Usage: $0 "<i40e ethernet interface>"
+       exit -1
+fi
+
+if [ "$PCI_SLOT_NAME" == "" ]; then
+       echo kernel version `uname -r` is not supported, please report the bug at e1000.sourceforge.net
+       exit -2
+fi
+
+CLUSTER=1
+TABLE=0
+INDEX=0
+
+OUTFILE=`mktemp`
+TMPFILE=`mktemp`
+
+# check for the debugfs directory being mounted
+if [ -d "/sys/kernel/debug/i40e" ]; then
+       echo debugfs found
+else
+       echo -n "mounting debugfs as /sys/kernel/debug: "
+       mount -t debugfs none /sys/kernel/debug && echo Success || (echo Failure ; exit -3)
+fi
+
+dmesg -c > /dev/null
+until [ "$TABLE" == "0xff" ]; do
+       until [ "$INDEX" == "0xffffffff" ]; do
+               echo dump debug fwdata $CLUSTER $TABLE $INDEX > /sys/kernel/debug/i40e/$PCI_SLOT_NAME/command
+               # check output, exit if no good
+               dmesg | grep -q unknown && (echo error encountered, see log; exit -4)
+               # store it, without modification
+               dmesg >> $OUTFILE
+               # erase it and prepare for parse
+               dmesg -c > $TMPFILE
+               TABLE=`grep rlen $TMPFILE | sed -e 's/.*next_table=\(.*\) .*\$/\1/'`
+               INDEX=`grep rlen $TMPFILE | sed -e 's/.*next_index=\(.*\)\$/\1/'`
+               echo -n .
+       done
+       INDEX=0
+done
+
+gzip $OUTFILE
+cp $OUTFILE.gz dump_tables.gz
+
+rm $OUTFILE.gz
+rm $TMPFILE
+
+echo Please send the file dump_tables.gz to linux.nics@intel.com or your Intel Support representative.
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/scripts/set_irq_affinity b/i40e-dkms-1.3.47/i40e-1.3.47/scripts/set_irq_affinity
new file mode 100755 (executable)
index 0000000..b362357
--- /dev/null
@@ -0,0 +1,229 @@
+#!/bin/bash
+#
+# Copyright (c) 2014, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#     * Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of Intel Corporation nor the names of its contributors
+#       may be used to endorse or promote products derived from this software
+#       without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Affinitize interrupts to cores
+#
+# typical usage is (as root):
+# set_irq_affinity -x local eth1 <eth2> <eth3>
+#
+# to get help:
+# set_irq_affinity
+
+usage()
+{
+       echo
+       echo "Usage: $0 [-x] {all|local|remote|one|custom} [ethX] <[ethY]>"
+       echo "  options: -x             Configure XPS as well as smp_affinity"
+       echo "  options: {remote|one} can be followed by a specific node number"
+       echo "  Ex: $0 local eth0"
+       echo "  Ex: $0 remote 1 eth0"
+       echo "  Ex: $0 custom eth0 eth1"
+       echo "  Ex: $0 0-7,16-23 eth0"
+       echo
+       exit 1
+}
+
+if [ "$1" == "-x" ]; then
+       XPS_ENA=1
+       shift
+fi
+
+num='^[0-9]+$'
+# Vars
+AFF=$1
+shift
+
+case "$AFF" in
+    remote)    [[ $1 =~ $num ]] && rnode=$1 && shift ;;
+    one)       [[ $1 =~ $num ]] && cnt=$1 && shift ;;
+    all)       ;;
+    local)     ;;
+    custom)    ;;
+    [0-9]*)    ;;
+    -h|--help) usage ;;
+    "")                usage ;;
+    *)         IFACES=$AFF && AFF=all ;;       # Backwards compat mode
+esac
+
+# append the interfaces listed to the string with spaces
+while [ "$#" -ne "0" ] ; do
+       IFACES+=" $1"
+       shift
+done
+
+# for now the user must specify interfaces
+if [ -z "$IFACES" ]; then
+       usage
+       exit 1
+fi
+
+# support functions
+
+set_affinity()
+{
+       VEC=$core
+       if [ $VEC -ge 32 ]
+       then
+               MASK_FILL=""
+               MASK_ZERO="00000000"
+               let "IDX = $VEC / 32"
+               for ((i=1; i<=$IDX;i++))
+               do
+                       MASK_FILL="${MASK_FILL},${MASK_ZERO}"
+               done
+
+               let "VEC -= 32 * $IDX"
+               MASK_TMP=$((1<<$VEC))
+               MASK=$(printf "%X%s" $MASK_TMP $MASK_FILL)
+       else
+               MASK_TMP=$((1<<$VEC))
+               MASK=$(printf "%X" $MASK_TMP)
+       fi
+
+       printf "%s" $MASK > /proc/irq/$IRQ/smp_affinity
+       printf "%s %d %s -> /proc/irq/$IRQ/smp_affinity\n" $IFACE $core $MASK
+       if ! [ -z "$XPS_ENA" ]; then
+               printf "%s %d %s -> /sys/class/net/%s/queues/tx-%d/xps_cpus\n" $IFACE $core $MASK $IFACE $((n-1))
+               printf "%s" $MASK > /sys/class/net/$IFACE/queues/tx-$((n-1))/xps_cpus
+       fi
+}
+
+# Allow usage of , or -
+#
+parse_range () {
+        RANGE=${@//,/ }
+        RANGE=${RANGE//-/..}
+        LIST=""
+        for r in $RANGE; do
+               # eval lets us use vars in {#..#} range
+                [[ $r =~ '..' ]] && r="$(eval echo {$r})"
+               LIST+=" $r"
+        done
+       echo $LIST
+}
+
+# Affinitize interrupts
+#
+setaff()
+{
+       CORES=$(parse_range $CORES)
+       ncores=$(echo $CORES | wc -w)
+       n=1
+
+       # this script only supports interrupt vectors in pairs,
+       # modification would be required to support a single Tx or Rx queue
+       # per interrupt vector
+
+       queues="${IFACE}-.*TxRx"
+
+       irqs=$(grep "$queues" /proc/interrupts | cut -f1 -d:)
+       [ -z "$irqs" ] && irqs=$(grep $IFACE /proc/interrupts | cut -f1 -d:)
+       [ -z "$irqs" ] && irqs=$(for i in `ls -Ux /sys/class/net/$IFACE/device/msi_irqs` ;\
+                                do grep "$i:.*TxRx" /proc/interrupts | grep -v fdir | cut -f 1 -d : ;\
+                                done)
+       [ -z "$irqs" ] && echo "Error: Could not find interrupts for $IFACE"
+
+       echo "IFACE CORE MASK -> FILE"
+       echo "======================="
+       for IRQ in $irqs; do
+               [ "$n" -gt "$ncores" ] && n=1
+               j=1
+               # much faster than calling cut for each
+               for i in $CORES; do
+                       [ $((j++)) -ge $n ] && break
+               done
+               core=$i
+               set_affinity
+               ((n++))
+       done
+}
+
+# now the actual useful bits of code
+
+# these next 2 lines would allow script to auto-determine interfaces
+#[ -z "$IFACES" ] && IFACES=$(ls /sys/class/net)
+#[ -z "$IFACES" ] && echo "Error: No interfaces up" && exit 1
+
+# echo IFACES is $IFACES
+
+CORES=$(</sys/devices/system/cpu/online)
+[ "$CORES" ] || CORES=$(grep ^proc /proc/cpuinfo | cut -f2 -d:)
+
+# Core list for each node from sysfs
+node_dir=/sys/devices/system/node
+for i in $(ls -d $node_dir/node*); do
+       i=${i/*node/}
+       corelist[$i]=$(<$node_dir/node${i}/cpulist)
+done
+
+for IFACE in $IFACES; do
+       # echo $IFACE being modified
+
+       dev_dir=/sys/class/net/$IFACE/device
+       [ -e $dev_dir/numa_node ] && node=$(<$dev_dir/numa_node)
+       [ "$node" ] && [ "$node" -gt 0 ] || node=0
+
+       case "$AFF" in
+       local)
+               CORES=${corelist[$node]}
+       ;;
+       remote)
+               [ "$rnode" ] || { [ $node -eq 0 ] && rnode=1 || rnode=0; }
+               CORES=${corelist[$rnode]}
+       ;;
+       one)
+               [ -n "$cnt" ] || cnt=0
+               CORES=$cnt
+       ;;
+       all)
+               CORES=$CORES
+       ;;
+       custom)
+               echo -n "Input cores for $IFACE (ex. 0-7,15-23): "
+               read CORES
+       ;;
+       [0-9]*)
+               CORES=$AFF
+       ;;
+       *)
+               usage
+               exit 1
+       ;;
+       esac
+
+       # call the worker function
+       setaff
+done
+
+# check for irqbalance running
+IRQBALANCE_ON=`ps ax | grep -v grep | grep -q irqbalance; echo $?`
+if [ "$IRQBALANCE_ON" == "0" ] ; then
+       echo " WARNING: irqbalance is running and will"
+       echo "          likely override this script's affinitization."
+       echo "          Please stop the irqbalance service and/or execute"
+       echo "          'killall irqbalance'"
+fi
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/Makefile b/i40e-dkms-1.3.47/i40e-1.3.47/src/Makefile
new file mode 100644 (file)
index 0000000..1f20882
--- /dev/null
@@ -0,0 +1,281 @@
+###########################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Driver
+# Copyright(c) 2013 - 2015 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+###########################################################################
+# Environment tests
+
+ifeq (,$(BUILD_KERNEL))
+BUILD_KERNEL=$(shell uname -r)
+endif
+
+ifeq (,$(wildcard build.mk))
+       DRIVERS :=  $(shell ls -ld i40e* | awk '/^d/ { print $$9 }')
+else
+       DRIVERS :=  $(shell ls -ld i40e i40evf | awk '/^d/ { print $$9 }')
+endif
+DIRS :=  $(patsubst %,%/,$(DRIVERS))
+SOURCES := $(shell find $(DRIVERS) -name "*.[ch]" | grep -v "\.mod\.c")
+MODULES := $(patsubst %,%.ko,$(DRIVERS))
+TARGETS := $(join $(DIRS), $(MODULES))
+MANFILES := $(patsubst %,%.7,$(DRIVERS))
+MANFILES := $(patsubst %,../%,$(MANFILES))
+###########################################################################
+# Environment tests
+
+# Kernel Search Path
+# All the places we look for kernel source
+KSP :=  /lib/modules/$(BUILD_KERNEL)/build \
+        /lib/modules/$(BUILD_KERNEL)/source \
+        /usr/src/linux-$(BUILD_KERNEL) \
+        /usr/src/linux-$($(BUILD_KERNEL) | sed 's/-.*//') \
+        /usr/src/kernel-headers-$(BUILD_KERNEL) \
+        /usr/src/kernel-source-$(BUILD_KERNEL) \
+        /usr/src/linux-$($(BUILD_KERNEL) | sed 's/\([0-9]*\.[0-9]*\)\..*/\1/') \
+        /usr/src/linux
+
+# prune the list down to only values that exist
+# and have an include/config sub-directory
+# as of last check, everything beyond 2.6.32 should have include/config
+# even in the SLES12 /lib/modules/`uname -r`/build
+test_dir = $(shell [ -e $(dir)/include/config ] && echo $(dir))
+KSP := $(foreach dir, $(KSP), $(test_dir))
+
+# we will use this first valid entry in the search path
+ifeq (,$(KSRC))
+  KSRC := $(firstword $(KSP))
+endif
+
+ifeq (,$(KSRC))
+  $(warning *** Kernel header files not in any of the expected locations.)
+  $(warning *** Install the appropriate kernel development package, e.g.)
+  $(error kernel-devel, for building kernel modules and try again)
+else
+ifeq (/lib/modules/$(BUILD_KERNEL)/source, $(KSRC))
+  KOBJ :=  /lib/modules/$(BUILD_KERNEL)/build
+else
+  KOBJ :=  $(KSRC)
+endif
+endif
+
+# Version file Search Path
+VSP :=  $(KOBJ)/include/generated/utsrelease.h \
+        $(KOBJ)/include/linux/utsrelease.h \
+        $(KOBJ)/include/linux/version.h \
+        $(KOBJ)/include/generated/uapi/linux/version.h \
+        /boot/vmlinuz.version.h
+
+# Config file Search Path
+CSP :=  $(KOBJ)/include/generated/autoconf.h \
+        $(KOBJ)/include/linux/autoconf.h \
+        /boot/vmlinuz.autoconf.h
+
+# prune the lists down to only files that exist
+test_file = $(shell [ -f $(file) ] && echo $(file))
+VSP := $(foreach file, $(VSP), $(test_file))
+CSP := $(foreach file, $(CSP), $(test_file))
+
+# and use the first valid entry in the Search Paths
+ifeq (,$(VERSION_FILE))
+  VERSION_FILE := $(firstword $(VSP))
+endif
+ifeq (,$(CONFIG_FILE))
+  CONFIG_FILE := $(firstword $(CSP))
+endif
+
+ifeq (,$(wildcard $(VERSION_FILE)))
+  $(error Linux kernel source not configured - missing version header file)
+endif
+
+ifeq (,$(wildcard $(CONFIG_FILE)))
+  $(error Linux kernel source not configured - missing autoconf.h)
+endif
+
+# Some helper functions for converting kernel version to version codes
+get_kver = $(or $(word ${2},$(subst ., ,${1})),0)
+get_kvercode = $(shell [ "${1}" -ge 0 -a "${1}" -le 255 2>/dev/null ] && \
+                       [ "${2}" -ge 0 -a "${2}" -le 255 2>/dev/null ] && \
+                       [ "${3}" -ge 0 -a "${3}" -le 255 2>/dev/null ] && \
+                       printf %d $$(( ( ${1} << 16 ) + ( ${2} << 8 ) + ( ${3} ) )) )
+
+# Convert LINUX_VERSION into LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION},)
+  LINUX_VERSION_CODE=$(call get_kvercode,$(call get_kver,${LINUX_VERSION},1),$(call get_kver,${LINUX_VERSION},2),$(call get_kver,${LINUX_VERSION},3))
+endif
+
+# Honor LINUX_VERSION_CODE
+ifneq (${LINUX_VERSION_CODE},)
+  $(info Setting LINUX_VERSION_CODE to ${LINUX_VERSION_CODE}$(if ${LINUX_VERSION}, from LINUX_VERSION=${LINUX_VERSION}))
+  KVER_CODE := ${LINUX_VERSION_CODE}
+  EXTRA_CFLAGS += -DLINUX_VERSION_CODE=${LINUX_VERSION_CODE}
+endif
+
+EXTRA_CFLAGS += $(CFLAGS_EXTRA)
+
+# get the kernel version - we use this to find the correct install path
+KVER := $(shell $(CC) $(EXTRA_CFLAGS) -E -dM $(VERSION_FILE) | grep UTS_RELEASE | \
+        awk '{ print $$3 }' | sed 's/\"//g')
+
+# assume source symlink is the same as build, otherwise adjust KOBJ
+ifneq (,$(wildcard /lib/modules/$(KVER)/build))
+ifneq ($(KSRC),$(shell readlink /lib/modules/$(KVER)/build))
+  KOBJ=/lib/modules/$(KVER)/build
+endif
+endif
+
+ifeq (${KVER_CODE},)
+  KVER_CODE := $(shell $(CC) $(EXTRA_CFLAGS) -E -dM $(VSP) 2> /dev/null |\
+                 grep -m 1 LINUX_VERSION_CODE | awk '{ print $$3 }' | sed 's/\"//g')
+endif
+
+# set the install path before and after 3.2.0, and handle
+# distros like SLES 11 that backported the directories
+ifeq (1,$(shell [ -d /lib/modules/$(KVER)/kernel/drivers/net/ethernet/intel ] && echo 1 || echo 0))
+INSTDIR := /lib/modules/$(KVER)/kernel/drivers/net/ethernet/intel
+else
+INSTDIR := /lib/modules/$(KVER)/kernel/drivers/net
+endif
+
+# abort the build on kernels older than 2.6.32
+ifneq (1,$(shell [ $(KVER_CODE) -ge 132640 ] && echo 1 || echo 0))
+  $(error *** Aborting the build. \
+          *** This driver is not supported on kernel versions older than 2.6.32)
+endif
+
+MANSECTION = 7
+
+ifeq (,$(MANDIR))
+  # find the best place to install the man page
+  MANPATH := $(shell (manpath 2>/dev/null || echo $MANPATH) | sed 's/:/ /g')
+  ifneq (,$(MANPATH))
+    # test based on inclusion in MANPATH
+    test_dir = $(findstring $(dir), $(MANPATH))
+  else
+    # no MANPATH, test based on directory existence
+    test_dir = $(shell [ -e $(dir) ] && echo $(dir))
+  endif
+  # our preferred install path
+  # should /usr/local/man be in here ?
+  MANDIR := /usr/share/man /usr/man
+  MANDIR := $(foreach dir, $(MANDIR), $(test_dir))
+  MANDIR := $(firstword $(MANDIR))
+endif
+ifeq (,$(MANDIR))
+  # fallback to /usr/man
+  MANDIR := /usr/man
+endif
+
+# kernel build function
+# $1 is the relative path of the subdir to build in
+# $2 is the kernel build target
+kernelbuild = $(shell (\
+               if [ -n "$(KOBJ)" ]; then \
+                       $(MAKE) ccflags-y:="$(CFLAGS_EXTRA)" -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=$(realpath $(1)) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) $(2) ; \
+               else \
+                       $(MAKE) ccflags-y:="$(CFLAGS_EXTRA)" -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=$(realpath $(1)) INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) $(2) ; \
+               fi > .tmp ; rm .tmp))
+
+
+###########################################################################
+# Build rules
+
+# We can't use the kernelbuild macro in verbose targets because it gobbles the
+# output of the shell.
+
+$(MODULES): $(TARGETS)
+       @cp $(TARGETS) .
+
+$(TARGETS): $(SOURCES)
+       @for s in $(DRIVERS) ; do \
+               if [ -n "$(KOBJ)" ]; then \
+                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s modules ; \
+               else \
+                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s modules ; \
+               fi ; \
+       done
+
+noisy: $(SOURCES)
+       @for s in $(DRIVERS) ; do \
+               if [ -n "$(KOBJ)" ]; then \
+                       $(MAKE) -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s V=1 modules ; \
+               else \
+                       $(MAKE) -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s V=1 modules ; \
+               fi ; \
+       done
+       @cp $(TARGETS) .
+
+silent: $(SOURCES)
+       $(foreach d, $(DRIVERS), $(call kernelbuild,$(d),modules))
+       @cp $(TARGETS) .
+
+sparse: clean $(SOURCES)
+       @for s in $(DRIVERS) ; do \
+               if [ -n "$(KOBJ)" ]; then \
+                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s C=2 CF="-D__CHECK_ENDIAN__ -Wbitwise -Wcontext" modules ; \
+               else \
+                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s C=2 CF="-D__CHECK_ENDIAN_ -Wbitwise -Wcontext" modules ; \
+               fi ; \
+       done
+
+ccc: clean $(SOURCES)
+       @for s in $(DRIVERS) ; do \
+               if [ -n "$(KOBJ)" ]; then \
+                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s coccicheck MODE=report; \
+               else \
+                       $(MAKE) ccflags-y+="$(CFLAGS_EXTRA)" -C $(KSRC) -O $(KOBJ) CONFIG_I40E=m CONFIG_I40EVF=m SUBDIRS=`pwd`/$$s coccicheck MODE=report; \
+               fi ; \
+       done
+
+manfile:
+       $(foreach m, $(DRIVERS), $(shell gzip -c ../$(m).$(MANSECTION) > $(m).$(MANSECTION).gz))
+
+clean:
+       $(foreach d, $(DRIVERS), $(call kernelbuild,$(d),clean))
+       @-rm -rf *.$(MANSECTION).gz *.ko
+
+install: $(MODULES) manfile
+# remove all old versions of the driver
+       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko))
+       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).gz))
+       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko.xz))
+       $(foreach m, $(DRIVERS), $(shell \
+               install -D -m 644 $(m).$(MANSECTION).gz $(INSTALL_MOD_PATH)$(MANDIR)/man$(MANSECTION)/$(m).$(MANSECTION).gz ; \
+               install -D -m 644 $(m).ko $(INSTALL_MOD_PATH)$(INSTDIR)/$(m)/$(m).ko))
+ifeq (,$(INSTALL_MOD_PATH))
+       @-/sbin/depmod -a $(KVER) || true
+else
+       @-/sbin/depmod -b $(INSTALL_MOD_PATH) -a -n $(KVER) > /dev/null || true
+endif
+
+uninstall:
+       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko))
+       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).gz))
+       $(foreach d, $(DRIVERS), $(shell rm -f $(INSTALL_MOD_PATH)$(INSTDIR)/$(d)/$(d).ko.xz))
+       @-/sbin/depmod -a $(KVER)
+       $(foreach m, $(DRIVERS), $(shell \
+               if [ -e $(INSTALL_MOD_PATH)$(MANDIR)/man$(MANSECTION)/$(m).$(MANSECTION).gz ] ; then \
+                       rm -f $(INSTALL_MOD_PATH)$(MANDIR)/man$(MANSECTION)/$(m).$(MANSECTION).gz ; \
+               fi))
+
+.PHONY: noisy clean manfile silent sparse ccc install uninstall
+.NOTPARALLEL:
+
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/Kbuild b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/Kbuild
new file mode 100644 (file)
index 0000000..fdcc55f
--- /dev/null
@@ -0,0 +1,51 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Driver
+# Copyright(c) 2013 - 2015 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
+#
+
+obj-m += i40e.o
+
+i40e-objs := i40e_main.o \
+       kcompat.o       \
+       i40e_ethtool.o  \
+       i40e_adminq.o   \
+       i40e_common.o   \
+       i40e_hmc.o      \
+       i40e_lan_hmc.o  \
+       i40e_nvm.o      \
+       i40e_configfs.o \
+       i40e_debugfs.o  \
+       i40e_diag.o     \
+       i40e_txrx.o     \
+       i40e_ptp.o      \
+       i40e_virtchnl_pf.o
+
+
+i40e-$(CONFIG_DCB) += i40e_dcb.o i40e_dcb_nl.o
+
+i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/Module.supported b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/Module.supported
new file mode 100644 (file)
index 0000000..c0e31cf
--- /dev/null
@@ -0,0 +1 @@
+i40e.ko external
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e.h
new file mode 100644 (file)
index 0000000..46fa83b
--- /dev/null
@@ -0,0 +1,941 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_H_
+#define _I40E_H_
+
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#ifdef NETIF_F_TSO6
+#include <net/ip6_checksum.h>
+#endif
+#endif
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include "kcompat.h"
+#ifdef HAVE_IOMMU_PRESENT
+#include <linux/iommu.h>
+#endif
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif /* HAVE_PTP_1588_CLOCK */
+#include "i40e_type.h"
+#include "i40e_prototype.h"
+#ifdef I40E_FCOE
+#include "i40e_fcoe.h"
+#endif
+#include "i40e_virtchnl.h"
+#include "i40e_virtchnl_pf.h"
+#include "i40e_txrx.h"
+#include "i40e_dcb.h"
+
+/* Useful i40e defaults */
+#define I40E_BASE_PF_SEID     16
+#define I40E_BASE_VSI_SEID    512
+#define I40E_BASE_VEB_SEID    288
+#define I40E_MAX_VEB          16
+
+#define I40E_MAX_NUM_DESCRIPTORS      4096
+#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
+#define I40E_DEFAULT_NUM_DESCRIPTORS  512
+#define I40E_REQ_DESCRIPTOR_MULTIPLE  32
+#define I40E_MIN_NUM_DESCRIPTORS      64
+#define I40E_MIN_MSIX                 2
+#define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */
+#define I40E_MIN_VSI_ALLOC            51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
+#define i40e_default_queues_per_vmdq(pf)  1 /* max 16 qps */
+#define I40E_DEFAULT_QUEUES_PER_VF    4
+#define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
+#define i40e_pf_get_max_q_per_tc(pf)  64 /* should be a power of 2 */
+#define I40E_FDIR_RING                0
+#define I40E_FDIR_RING_COUNT          32
+#ifdef I40E_FCOE
+#define I40E_DEFAULT_FCOE             8 /* default number of QPs for FCoE */
+#define I40E_MINIMUM_FCOE             1 /* minimum number of QPs for FCoE */
+#endif /* I40E_FCOE */
+#define I40E_MAX_AQ_BUF_SIZE          4096
+#define I40E_AQ_LEN                   256
+#define I40E_AQ_WORK_LIMIT            66 /* max number of VFs + a little */
+#define I40E_MAX_USER_PRIORITY        8
+#define I40E_DEFAULT_MSG_ENABLE       4
+#define I40E_QUEUE_WAIT_RETRY_LIMIT   10
+#define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 16)
+
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+/* Ethtool Private Flags */
+#define I40E_PRIV_FLAGS_MFP_FLAG       BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG  BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR         BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS      BIT(3)
+#endif
+
+#define I40E_NVM_VERSION_LO_SHIFT  0
+#define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT  12
+#define I40E_NVM_VERSION_HI_MASK   (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK    0xffff
+#define I40E_OEM_VER_PATCH_MASK    0xff
+#define I40E_OEM_VER_BUILD_SHIFT   8
+#define I40E_OEM_VER_SHIFT         24
+
+/* The values in here are decimal coded as hex as is the case in the NVM map*/
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x40
+
+/* magic for getting defines into strings */
+#define STRINGIFY(foo)  #foo
+#define XSTRINGIFY(bar) STRINGIFY(bar)
+
+#define I40E_RX_DESC(R, i)                     \
+       ((ring_is_16byte_desc_enabled(R))       \
+               ? (union i40e_32byte_rx_desc *) \
+                       (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
+               : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+#define I40E_TX_DESC(R, i)                     \
+       (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i)                 \
+       (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define I40E_TX_FDIRDESC(R, i)                 \
+       (&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
+
+/* default to trying for four seconds */
+#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
+
+/* driver state flags */
+enum i40e_state_t {
+       __I40E_TESTING,
+       __I40E_CONFIG_BUSY,
+       __I40E_CONFIG_DONE,
+       __I40E_DOWN,
+       __I40E_NEEDS_RESTART,
+       __I40E_SERVICE_SCHED,
+       __I40E_ADMINQ_EVENT_PENDING,
+       __I40E_MDD_EVENT_PENDING,
+       __I40E_VFLR_EVENT_PENDING,
+       __I40E_RESET_RECOVERY_PENDING,
+       __I40E_RESET_INTR_RECEIVED,
+       __I40E_REINIT_REQUESTED,
+       __I40E_PF_RESET_REQUESTED,
+       __I40E_CORE_RESET_REQUESTED,
+       __I40E_GLOBAL_RESET_REQUESTED,
+       __I40E_EMP_RESET_REQUESTED,
+       __I40E_EMP_RESET_INTR_RECEIVED,
+       __I40E_FILTER_OVERFLOW_PROMISC,
+       __I40E_SUSPENDED,
+       __I40E_BAD_EEPROM,
+       __I40E_DEBUG_MODE,
+       __I40E_DOWN_REQUESTED,
+       __I40E_FD_FLUSH_REQUESTED,
+       __I40E_RESET_FAILED,
+       __I40E_PORT_TX_SUSPENDED,
+       __I40E_VF_DISABLE,
+};
+
+enum i40e_interrupt_policy {
+       I40E_INTERRUPT_BEST_CASE,
+       I40E_INTERRUPT_MEDIUM,
+       I40E_INTERRUPT_LOWEST
+};
+
+struct i40e_lump_tracking {
+       u16 num_entries;
+       u16 search_hint;
+       u16 list[0];
+#define I40E_PILE_VALID_BIT  0x8000
+};
+
+#define I40E_DEFAULT_ATR_SAMPLE_RATE   20
+#define I40E_FDIR_MAX_RAW_PACKET_SIZE  512
+#define I40E_FDIR_BUFFER_FULL_MARGIN   10
+#define I40E_FDIR_BUFFER_HEAD_ROOM     32
+#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
+
+#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
+
+enum i40e_fd_stat_idx {
+       I40E_FD_STAT_ATR,
+       I40E_FD_STAT_SB,
+       I40E_FD_STAT_ATR_TUNNEL,
+       I40E_FD_STAT_PF_COUNT
+};
+#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
+#define I40E_FD_ATR_STAT_IDX(pf_id) \
+                       (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
+#define I40E_FD_SB_STAT_IDX(pf_id)  \
+                       (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+                       (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
+
+struct i40e_fdir_filter {
+       struct hlist_node fdir_node;
+       /* filter ipnut set */
+       u8 flow_type;
+       u8 ip4_proto;
+       /* TX packet view of src and dst */
+       __be32 dst_ip[4];
+       __be32 src_ip[4];
+       __be16 src_port;
+       __be16 dst_port;
+       __be32 sctp_v_tag;
+       /* filter control */
+       u16 q_index;
+       u8  flex_off;
+       u8  pctype;
+       u16 dest_vsi;
+       u8  dest_ctl;
+       u8  fd_status;
+       u16 cnt_index;
+       u32 fd_id;
+};
+
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+
+#define I40E_CLOUD_FIELD_OMAC  0x01
+#define I40E_CLOUD_FIELD_IMAC  0x02
+#define I40E_CLOUD_FIELD_IVLAN 0x04
+#define I40E_CLOUD_FIELD_TEN_ID        0x08
+#define I40E_CLOUD_FIELD_IIP   0x10
+
+#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \
+                                            I40E_CLOUD_FIELD_IVLAN)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+                                                  I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \
+                                                  I40E_CLOUD_FIELD_IMAC | \
+                                                  I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+                                                   I40E_CLOUD_FIELD_IVLAN | \
+                                                   I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IIP  I40E_CLOUD_FIELD_IIP
+
+struct i40e_cloud_filter {
+       struct hlist_node cloud_node;
+       /* cloud filter input set follows */
+       u8 outer_mac[ETH_ALEN];
+       u8 inner_mac[ETH_ALEN];
+       __be16 inner_vlan;
+       __be32 inner_ip[4];
+       u32 tenant_id;
+       u8 flags;
+#define I40E_CLOUD_TNL_TYPE_XVLAN    1
+       u8 tunnel_type;
+       /* filter control */
+       u16 vsi_id; /* vsi number */
+       u16 queue_id;
+       u32 id;
+};
+
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+
+#define I40E_ETH_P_LLDP                        0x88cc
+
+#define I40E_DCB_PRIO_TYPE_STRICT      0
+#define I40E_DCB_PRIO_TYPE_ETS         1
+#define I40E_DCB_STRICT_PRIO_CREDITS   127
+#define I40E_MAX_USER_PRIORITY 8
+/* DCB per TC information data structure */
+struct i40e_tc_info {
+       u16     qoffset;        /* Queue offset from base queue */
+       u16     qcount;         /* Total Queues */
+       u8      netdev_tc;      /* Netdev TC index if netdev associated */
+};
+
+/* TC configuration data structure */
+struct i40e_tc_configuration {
+       u8      numtc;          /* Total number of enabled TCs */
+       u8      enabled_tc;     /* TC map */
+       struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* struct that defines the Ethernet device */
+struct i40e_pf {
+       struct pci_dev *pdev;
+       struct i40e_hw hw;
+       unsigned long state;
+       struct msix_entry *msix_entries;
+       bool fc_autoneg_status;
+
+       u16 eeprom_version;
+       u16 num_vmdq_vsis;         /* num vmdq vsis this PF has set up */
+       u16 num_vmdq_qps;          /* num queue pairs per vmdq pool */
+       u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */
+       u16 num_req_vfs;           /* num vfs requested for this VF */
+       u16 num_vf_qps;            /* num queue pairs per VF */
+#ifdef I40E_FCOE
+       u16 num_fcoe_qps;          /* num fcoe queues this PF has set up */
+       u16 num_fcoe_msix;         /* num queue vectors per fcoe pool */
+#endif /* I40E_FCOE */
+       u16 num_lan_qps;           /* num lan queues this PF has set up */
+       u16 num_lan_msix;          /* num queue vectors for the base PF vsi */
+       int queues_left;           /* queues left unclaimed */
+       u16 rss_size;              /* num queues in the RSS array */
+       u16 rss_size_max;          /* HW defined max RSS queues */
+       u16 fdir_pf_filter_count;  /* num of guaranteed filters for this PF */
+       u16 num_alloc_vsi;         /* num VSIs this driver supports */
+       u8 atr_sample_rate;
+       bool wol_en;
+
+       struct hlist_head fdir_filter_list;
+       u16 fdir_pf_active_filters;
+       unsigned long fd_flush_timestamp;
+       u32 fd_flush_cnt;
+       u32 fd_add_err;
+       u32 fd_atr_cnt;
+       u32 fd_tcp_rule;
+
+       __be16  vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+       u16 pending_vxlan_bitmap;
+
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+       struct hlist_head cloud_filter_list;
+       u16 num_cloud_filters;
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+
+       enum i40e_interrupt_policy int_policy;
+       u16 rx_itr_default;
+       u16 tx_itr_default;
+       u32 msg_enable;
+       char int_name[I40E_INT_NAME_STR_LEN];
+       u16 adminq_work_limit; /* num of admin receive queue desc to process */
+       unsigned long service_timer_period;
+       unsigned long service_timer_previous;
+       struct timer_list service_timer;
+       struct work_struct service_task;
+
+       u64 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED              BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED                  BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED                 BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED              BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED                BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED                  BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT         BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE             BIT_ULL(9)
+#ifdef I40E_FCOE
+#define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
+#endif /* I40E_FCOE */
+#define I40E_FLAG_IN_NETPOLL                   BIT_ULL(12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT            BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT           BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED                BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED                  BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED                BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED               BIT_ULL(22)
+#ifdef HAVE_PTP_1588_CLOCK
+#define I40E_FLAG_PTP                          BIT_ULL(25)
+#endif /* HAVE_PTP_1588_CLOCK */
+#define I40E_FLAG_MFP_ENABLED                  BIT_ULL(26)
+#define I40E_FLAG_VXLAN_FILTER_SYNC            BIT_ULL(27)
+#define I40E_FLAG_PORT_ID_VALID                BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE                  BIT_ULL(29)
+#define I40E_FLAG_VEB_STATS_ENABLED            BIT_ULL(37)
+#define I40E_FLAG_LINK_POLLING_ENABLED         BIT_ULL(39)
+#define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
+#define I40E_FLAG_NO_PCI_LINK_CHECK            BIT_ULL(41)
+
+       /* tracks features that get auto disabled by errors */
+       u64 auto_disable_flags;
+
+#ifdef I40E_FCOE
+       struct i40e_fcoe fcoe;
+
+#endif /* I40E_FCOE */
+       bool stat_offsets_loaded;
+       struct i40e_hw_port_stats stats;
+       struct i40e_hw_port_stats stats_offsets;
+       u32 tx_timeout_count;
+       u32 tx_timeout_recovery_level;
+       unsigned long tx_timeout_last_recovery;
+       u32 tx_sluggish_count;
+       u32 hw_csum_rx_error;
+       u32 led_status;
+       u16 corer_count; /* Core reset count */
+       u16 globr_count; /* Global reset count */
+       u16 empr_count; /* EMP reset count */
+       u16 pfr_count; /* PF reset count */
+       u16 sw_int_count; /* SW interrupt count */
+
+       struct mutex switch_mutex;
+       u16 lan_vsi;       /* our default LAN VSI */
+       u16 lan_veb;       /* initial relay, if exists */
+#define I40E_NO_VEB   0xffff
+#define I40E_NO_VSI   0xffff
+       u16 next_vsi;      /* Next unallocated VSI - 0-based! */
+       struct i40e_vsi **vsi;
+       struct i40e_veb *veb[I40E_MAX_VEB];
+
+       struct i40e_lump_tracking *qp_pile;
+       struct i40e_lump_tracking *irq_pile;
+
+       /* switch config info */
+       u16 pf_seid;
+       u16 main_vsi_seid;
+       u16 mac_seid;
+       struct kobject *switch_kobj;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *i40e_dbg_pf;
+#endif /* CONFIG_DEBUG_FS */
+       bool cur_promisc;
+
+       u16 instance; /* A unique number per i40e_pf instance in the system */
+
+       /* sr-iov config info */
+       struct i40e_vf *vf;
+       int num_alloc_vfs;      /* actual number of VFs allocated */
+       u32 vf_aq_requests;
+
+       /* DCBx/DCBNL capability for PF that indicates
+        * whether DCBx is managed by firmware or host
+        * based agent (LLDPAD). Also, indicates what
+        * flavor of DCBx protocol (IEEE/CEE) is supported
+        * by the device. For now we're supporting IEEE
+        * mode only.
+        */
+       u16 dcbx_cap;
+
+       u32     fcoe_hmc_filt_num;
+       u32     fcoe_hmc_cntx_num;
+       struct i40e_filter_control_settings filter_settings;
+#ifdef HAVE_PTP_1588_CLOCK
+
+       struct ptp_clock *ptp_clock;
+       struct ptp_clock_info ptp_caps;
+       struct sk_buff *ptp_tx_skb;
+       struct hwtstamp_config tstamp_config;
+       unsigned long last_rx_ptp_check;
+       spinlock_t tmreg_lock; /* Used to protect the device time registers. */
+       u64 ptp_base_adj;
+       u32 rx_hwtstamp_cleared;
+       bool ptp_tx;
+       bool ptp_rx;
+#endif /* HAVE_PTP_1588_CLOCK */
+#ifdef I40E_ADD_PROBES
+       u64 tcp_segs;
+       u64 tx_tcp_cso;
+       u64 tx_udp_cso;
+       u64 tx_sctp_cso;
+       u64 tx_ip4_cso;
+       u64 rx_tcp_cso;
+       u64 rx_udp_cso;
+       u64 rx_sctp_cso;
+       u64 rx_ip4_cso;
+       u64 rx_tcp_cso_err;
+       u64 rx_udp_cso_err;
+       u64 rx_sctp_cso_err;
+       u64 rx_ip4_cso_err;
+#endif
+       u16 rss_table_size;
+       u32 max_bw;
+       u32 min_bw;
+
+       u32 ioremap_len;
+       u32 fd_inv;
+};
+
+struct i40e_mac_filter {
+       struct list_head list;
+       u8 macaddr[ETH_ALEN];
+#define I40E_VLAN_ANY -1
+       s16 vlan;
+       u8 counter;             /* number of instances of this filter */
+       bool is_vf;             /* filter belongs to a VF */
+       bool is_netdev;         /* filter belongs to a netdev */
+       bool changed;           /* filter needs to be sync'd to the HW */
+       bool is_laa;            /* filter is a Locally Administered Address */
+};
+
+struct i40e_veb {
+       struct i40e_pf *pf;
+       u16 idx;
+       u16 veb_idx;           /* index of VEB parent */
+       u16 seid;
+       u16 uplink_seid;
+       u16 stats_idx;           /* index of VEB parent */
+       u8  enabled_tc;
+       u16 bridge_mode;        /* Bridge Mode (VEB/VEPA) */
+       u16 flags;
+       u16 bw_limit;
+       u8  bw_max_quanta;
+       bool is_abs_credits;
+       u8  bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS];
+       u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+       u8  bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+       struct kobject *kobj;
+       bool stat_offsets_loaded;
+       struct i40e_eth_stats stats;
+       struct i40e_eth_stats stats_offsets;
+       struct i40e_veb_tc_stats tc_stats;
+       struct i40e_veb_tc_stats tc_stats_offsets;
+};
+
+/* struct that defines a VSI, associated with a dev */
+struct i40e_vsi {
+       struct net_device *netdev;
+#ifdef HAVE_VLAN_RX_REGISTER
+       struct vlan_group *vlgrp;
+#else
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#endif
+       bool netdev_registered;
+       bool stat_offsets_loaded;
+
+       u32 current_netdev_flags;
+       unsigned long state;
+#define I40E_VSI_FLAG_FILTER_CHANGED   BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER                BIT(1)
+       unsigned long flags;
+
+       /* Per VSI lock to protect elements/list (MAC filter) */
+       spinlock_t mac_filter_list_lock;
+       struct list_head mac_filter_list;
+
+       /* VSI stats */
+#ifdef HAVE_NDO_GET_STATS64
+       struct rtnl_link_stats64 net_stats;
+       struct rtnl_link_stats64 net_stats_offsets;
+#else
+       struct net_device_stats net_stats;
+       struct net_device_stats net_stats_offsets;
+#endif
+       struct i40e_eth_stats eth_stats;
+       struct i40e_eth_stats eth_stats_offsets;
+#ifdef I40E_FCOE
+       struct i40e_fcoe_stats fcoe_stats;
+       struct i40e_fcoe_stats fcoe_stats_offsets;
+       bool fcoe_stat_offsets_loaded;
+#endif
+       u32 tx_restart;
+       u32 tx_busy;
+       u64 tx_linearize;
+       u32 rx_buf_failed;
+       u32 rx_page_failed;
+
+       /* These are containers of ring pointers, allocated at run-time */
+       struct i40e_ring **rx_rings;
+       struct i40e_ring **tx_rings;
+
+       u16 work_limit;
+       /* high bit set means dynamic, use accessor routines to read/write.
+        * hardware only supports 2us resolution for the ITR registers.
+        * these values always store the USER setting, and must be converted
+        * before programming to a register.
+        */
+       u16 rx_itr_setting;
+       u16 tx_itr_setting;
+       u16 int_rate_limit;  /* value in usecs */
+
+       u16 rss_table_size;
+       u16 rss_size;
+
+       u16 max_frame;
+       u16 rx_hdr_len;
+       u16 rx_buf_len;
+       u8  dtype;
+
+       /* List of q_vectors allocated to this VSI */
+       struct i40e_q_vector **q_vectors;
+       int num_q_vectors;
+       int base_vector;
+       bool irqs_ready;
+
+       u16 seid;            /* HW index of this VSI (absolute index) */
+       u16 id;              /* VSI number */
+       u16 uplink_seid;
+
+       u16 base_queue;      /* vsi's first queue in hw array */
+       u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+       u16 req_queue_pairs; /* User requested queue pairs */
+       u16 num_queue_pairs; /* Used tx and rx pairs */
+       u16 num_desc;
+       enum i40e_vsi_type type;  /* VSI type, e.g., LAN, FCoE, etc */
+       u16 vf_id;              /* Virtual function ID for SRIOV VSIs */
+
+       struct i40e_tc_configuration tc_config;
+       struct i40e_aqc_vsi_properties_data info;
+
+       /* VSI BW limit (absolute across all TCs) */
+       u16 bw_limit;           /* VSI BW Limit (0 = disabled) */
+       u8  bw_max_quanta;      /* Max Quanta when BW limit is enabled */
+
+       /* Relative TC credits across VSIs */
+       u8  bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
+       /* TC BW limit credits within VSI */
+       u16  bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+       /* TC BW limit max quanta within VSI */
+       u8  bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+
+       struct i40e_pf *back;  /* Backreference to associated PF */
+       u16 idx;               /* index in pf->vsi[] */
+       u16 veb_idx;           /* index of VEB parent */
+       struct kobject *kobj;  /* sysfs object */
+       bool current_isup;     /* Sync 'link up' logging */
+       bool block_tx_timeout;
+
+       /* VSI specific handlers */
+       irqreturn_t (*irq_handler)(int irq, void *data);
+#ifdef ETHTOOL_GRXRINGS
+
+       /* current rxnfc data */
+       struct ethtool_rxnfc rxnfc; /* current rss hash opts */
+#endif
+} ____cacheline_internodealigned_in_smp;
+
+struct i40e_netdev_priv {
+       struct i40e_vsi *vsi;
+};
+
+/* struct that defines an interrupt vector */
+struct i40e_q_vector {
+       struct i40e_vsi *vsi;
+
+       u16 v_idx;              /* index in the vsi->q_vector array. */
+       u16 reg_idx;            /* register index of the interrupt */
+
+       struct napi_struct napi;
+
+       struct i40e_ring_container rx;
+       struct i40e_ring_container tx;
+
+       u8 num_ringpairs;       /* total number of ring pairs in vector */
+
+#ifdef HAVE_IRQ_AFFINITY_HINT
+       cpumask_t affinity_mask;
+#endif
+       struct rcu_head rcu;    /* to avoid race with update stats on free */
+       char name[I40E_INT_NAME_STR_LEN];
+#define ITR_COUNTDOWN_START 100
+       u8 itr_countdown;       /* when 0 should adjust ITR */
+} ____cacheline_internodealigned_in_smp;
+
+/* lan device */
+struct i40e_device {
+       struct list_head list;
+       struct i40e_pf *pf;
+};
+
+/**
+ * i40e_nvm_version_str - format the NVM version strings
+ * @hw: ptr to the hardware info
+ **/
+static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
+{
+       static char buf[32];
+       u32 full_ver;
+       u8 ver, patch;
+       u16 build;
+
+       full_ver = hw->nvm.oem_ver;
+       ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+       build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT)
+                & I40E_OEM_VER_BUILD_MASK);
+       patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
+
+       snprintf(buf, sizeof(buf),
+                "%x.%02x 0x%x %d.%d.%d",
+                (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
+                       I40E_NVM_VERSION_HI_SHIFT,
+                (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
+                       I40E_NVM_VERSION_LO_SHIFT,
+                hw->nvm.eetrack, ver, build, patch);
+
+       return buf;
+}
+
+/**
+ * i40e_netdev_to_pf: Retrieve the PF struct for given netdev
+ * @netdev: the corresponding netdev
+ *
+ * Return the PF struct for the given netdev
+ **/
+static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+
+       return vsi->back;
+}
+
+static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
+                               irqreturn_t (*irq_handler)(int, void *))
+{
+       vsi->irq_handler = irq_handler;
+}
+
+/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qw: the first quad word of the program status descriptor
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qw)
+{
+       return I40E_RX_PROG_STATUS_DESC_LENGTH ==
+               (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
+}
+
+/**
+ * i40e_get_fd_cnt_all - get the total FD filter space available
+ * @pf: pointer to the PF struct
+ **/
+static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+{
+       return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+}
+
+/* needed by i40e_ethtool.c */
+int i40e_up(struct i40e_vsi *vsi);
+void i40e_down(struct i40e_vsi *vsi);
+extern char i40e_driver_name[];
+extern const char i40e_driver_version_str[];
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
+void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_eth_stats(struct i40e_vsi *vsi);
+#ifdef HAVE_NDO_GET_STATS64
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
+#else
+struct net_device_stats *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
+#endif
+int i40e_fetch_switch_configuration(struct i40e_pf *pf,
+                                   bool printconfig);
+
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
+                            struct i40e_pf *pf, bool add);
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+                     struct i40e_fdir_filter *input, bool add);
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
+u32 i40e_get_current_fd_count(struct i40e_pf *pf);
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_global_fd_count(struct i40e_pf *pf);
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
+void i40e_set_ethtool_ops(struct net_device *netdev);
+struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
+                                        u8 *macaddr, s16 vlan,
+                                        bool is_vf, bool is_netdev);
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+                                       u8 *macaddr, s16 vlan,
+                                       bool is_vf, bool is_netdev);
+void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
+                    bool is_vf, bool is_netdev);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl);
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+                               u16 uplink, u32 param1);
+int i40e_vsi_release(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
+                                struct i40e_vsi *start_vsi);
+int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type);
+int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi);
+int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi);
+int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc);
+int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename);
+#ifdef I40E_FCOE
+void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+                             struct i40e_vsi_context *ctxt,
+                             u8 enabled_tc, bool is_add);
+#endif
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+void i40e_quiesce_vsi(struct i40e_vsi *vsi);
+void i40e_unquiesce_vsi(struct i40e_vsi *vsi);
+void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf);
+void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf);
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+                               u16 downlink_seid, u8 enabled_tc);
+void i40e_veb_release(struct i40e_veb *veb);
+
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
+i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+int i40e_add_del_cloud_filter(struct i40e_pf *pf,
+                                        struct i40e_cloud_filter *filter,
+                                        struct i40e_vsi *vsi,
+                                        bool add);
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
+void i40e_pf_reset_stats(struct i40e_pf *pf);
+#ifdef CONFIG_DEBUG_FS
+void i40e_dbg_pf_init(struct i40e_pf *pf);
+void i40e_dbg_pf_exit(struct i40e_pf *pf);
+void i40e_dbg_init(void);
+void i40e_dbg_exit(void);
+#else
+static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {}
+static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
+static inline void i40e_dbg_init(void) {}
+static inline void i40e_dbg_exit(void) {}
+#endif /* CONFIG_DEBUG_FS*/
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector, without base_vector
+ **/
+static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+             (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+       wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
+       /* skip the flush */
+}
+
+void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
+#ifdef I40E_FCOE
+#ifdef HAVE_NDO_GET_STATS64
+struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+                                            struct net_device *netdev,
+                                            struct rtnl_link_stats64 *storage);
+#else
+struct net_device_stats *i40e_get_netdev_stats_struct(
+                                                    struct net_device *netdev);
+#endif
+int i40e_set_mac(struct net_device *netdev, void *p);
+void i40e_set_rx_mode(struct net_device *netdev);
+#endif
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+#ifdef I40E_FCOE
+void i40e_tx_timeout(struct net_device *netdev);
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+int i40e_vlan_rx_add_vid(struct net_device *netdev,
+                        __always_unused __be16 proto, u16 vid);
+int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+                         __always_unused __be16 proto, u16 vid);
+#else
+int i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+int i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+#endif
+#else
+void i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+void i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+#endif
+#endif
+int i40e_open(struct net_device *netdev);
+int i40e_vsi_open(struct i40e_vsi *vsi);
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+                                            bool is_vf, bool is_netdev);
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+                                     bool is_vf, bool is_netdev);
+#ifdef I40E_FCOE
+int i40e_close(struct net_device *netdev);
+int i40e_setup_tc(struct net_device *netdev, u8 tc);
+void i40e_netpoll(struct net_device *netdev);
+int i40e_fcoe_enable(struct net_device *netdev);
+int i40e_fcoe_disable(struct net_device *netdev);
+int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
+u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
+void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
+void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
+void i40e_init_pf_fcoe(struct i40e_pf *pf);
+int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
+void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
+int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc,
+                            struct sk_buff *skb);
+void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc, u8 prog_id);
+#endif /* I40E_FCOE */
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+                          struct i40e_dcbx_config *old_cfg,
+                          struct i40e_dcbx_config *new_cfg);
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
+void i40e_dcbnl_setup(struct i40e_vsi *vsi);
+#endif /* HAVE_DCBNL_IEEE */
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+                           struct i40e_dcbx_config *old_cfg,
+                           struct i40e_dcbx_config *new_cfg);
+#endif /* CONFIG_DCB */
+#ifdef HAVE_PTP_1588_CLOCK
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
+void i40e_ptp_set_increment(struct i40e_pf *pf);
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_init(struct i40e_pf *pf);
+void i40e_ptp_stop(struct i40e_pf *pf);
+#endif /* HAVE_PTP_1588_CLOCK */
+u8 i40e_pf_get_num_tc(struct i40e_pf *pf);
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+int i40e_configfs_init(void);
+void i40e_configfs_exit(void);
+#endif /* CONFIG_CONFIGFS_FS */
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+#endif /* _I40E_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.c
new file mode 100644 (file)
index 0000000..58e7ed5
--- /dev/null
@@ -0,0 +1,1080 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+       return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
+               desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
+}
+
+/**
+ *  i40e_adminq_init_regs - Initialize AdminQ registers
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+       /* set head and tail registers in our local struct */
+       if (i40e_is_vf(hw)) {
+               hw->aq.asq.tail = I40E_VF_ATQT1;
+               hw->aq.asq.head = I40E_VF_ATQH1;
+               hw->aq.asq.len  = I40E_VF_ATQLEN1;
+               hw->aq.asq.bal  = I40E_VF_ATQBAL1;
+               hw->aq.asq.bah  = I40E_VF_ATQBAH1;
+               hw->aq.arq.tail = I40E_VF_ARQT1;
+               hw->aq.arq.head = I40E_VF_ARQH1;
+               hw->aq.arq.len  = I40E_VF_ARQLEN1;
+               hw->aq.arq.bal  = I40E_VF_ARQBAL1;
+               hw->aq.arq.bah  = I40E_VF_ARQBAH1;
+       } else {
+               hw->aq.asq.tail = I40E_PF_ATQT;
+               hw->aq.asq.head = I40E_PF_ATQH;
+               hw->aq.asq.len  = I40E_PF_ATQLEN;
+               hw->aq.asq.bal  = I40E_PF_ATQBAL;
+               hw->aq.asq.bah  = I40E_PF_ATQBAH;
+               hw->aq.arq.tail = I40E_PF_ARQT;
+               hw->aq.arq.head = I40E_PF_ARQH;
+               hw->aq.arq.len  = I40E_PF_ARQLEN;
+               hw->aq.arq.bal  = I40E_PF_ARQBAL;
+               hw->aq.arq.bah  = I40E_PF_ARQBAH;
+       }
+}
+
+/**
+ *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ **/
+i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+       i40e_status ret_code;
+
+       ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+                                        i40e_mem_atq_ring,
+                                        (hw->aq.num_asq_entries *
+                                        sizeof(struct i40e_aq_desc)),
+                                        I40E_ADMINQ_DESC_ALIGNMENT);
+       if (ret_code)
+               return ret_code;
+
+       ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+                                         (hw->aq.num_asq_entries *
+                                         sizeof(struct i40e_asq_cmd_details)));
+       if (ret_code) {
+               i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+               return ret_code;
+       }
+
+       return ret_code;
+}
+
+/**
+ *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ **/
+i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+       i40e_status ret_code;
+
+       ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+                                        i40e_mem_arq_ring,
+                                        (hw->aq.num_arq_entries *
+                                        sizeof(struct i40e_aq_desc)),
+                                        I40E_ADMINQ_DESC_ALIGNMENT);
+
+       return ret_code;
+}
+
+/**
+ *  i40e_free_adminq_asq - Free Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted send buffers have already been cleaned
+ *  and de-allocated
+ **/
+void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+       i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ *  i40e_free_adminq_arq - Free Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted receive buffers have already been cleaned
+ *  and de-allocated
+ **/
+void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+       i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+       i40e_status ret_code;
+       struct i40e_aq_desc *desc;
+       struct i40e_dma_mem *bi;
+       int i;
+
+       /* We'll be allocating the buffer info memory first, then we can
+        * allocate the mapped buffers for the event processing
+        */
+
+       /* buffer_info structures do not need alignment */
+       ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+               (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+       if (ret_code)
+               goto alloc_arq_bufs;
+       hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+       /* allocate the mapped buffers */
+       for (i = 0; i < hw->aq.num_arq_entries; i++) {
+               bi = &hw->aq.arq.r.arq_bi[i];
+               ret_code = i40e_allocate_dma_mem(hw, bi,
+                                                i40e_mem_arq_buf,
+                                                hw->aq.arq_buf_size,
+                                                I40E_ADMINQ_DESC_ALIGNMENT);
+               if (ret_code)
+                       goto unwind_alloc_arq_bufs;
+
+               /* now configure the descriptors for use */
+               desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+               desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+               if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+                       desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+               desc->opcode = 0;
+               /* This is in accordance with Admin queue design, there is no
+                * register for buffer size configuration
+                */
+               desc->datalen = CPU_TO_LE16((u16)bi->size);
+               desc->retval = 0;
+               desc->cookie_high = 0;
+               desc->cookie_low = 0;
+               desc->params.external.addr_high =
+                       CPU_TO_LE32(upper_32_bits(bi->pa));
+               desc->params.external.addr_low =
+                       CPU_TO_LE32(lower_32_bits(bi->pa));
+               desc->params.external.param0 = 0;
+               desc->params.external.param1 = 0;
+       }
+
+alloc_arq_bufs:
+       return ret_code;
+
+unwind_alloc_arq_bufs:
+       /* don't try to free the one that failed... */
+       i--;
+       for (; i >= 0; i--)
+               i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+       i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+       return ret_code;
+}
+
+/**
+ *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ *  @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+       i40e_status ret_code;
+       struct i40e_dma_mem *bi;
+       int i;
+
+       /* No mapped memory needed yet, just the buffer info structures */
+       ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+               (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+       if (ret_code)
+               goto alloc_asq_bufs;
+       hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+       /* allocate the mapped buffers */
+       for (i = 0; i < hw->aq.num_asq_entries; i++) {
+               bi = &hw->aq.asq.r.asq_bi[i];
+               ret_code = i40e_allocate_dma_mem(hw, bi,
+                                                i40e_mem_asq_buf,
+                                                hw->aq.asq_buf_size,
+                                                I40E_ADMINQ_DESC_ALIGNMENT);
+               if (ret_code)
+                       goto unwind_alloc_asq_bufs;
+       }
+alloc_asq_bufs:
+       return ret_code;
+
+unwind_alloc_asq_bufs:
+       /* don't try to free the one that failed... */
+       i--;
+       for (; i >= 0; i--)
+               i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+       i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+       return ret_code;
+}
+
+/**
+ *  i40e_free_arq_bufs - Free receive queue buffer info elements
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+       int i;
+
+       /* free descriptors */
+       for (i = 0; i < hw->aq.num_arq_entries; i++)
+               i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+       /* free the descriptor memory */
+       i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+       /* free the dma header */
+       i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ *  i40e_free_asq_bufs - Free send queue buffer info elements
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+       int i;
+
+       /* only unmap if the address is non-NULL */
+       for (i = 0; i < hw->aq.num_asq_entries; i++)
+               if (hw->aq.asq.r.asq_bi[i].pa)
+                       i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+       /* free the buffer info list */
+       i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+       /* free the descriptor memory */
+       i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+       /* free the dma header */
+       i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ *  i40e_config_asq_regs - configure ASQ registers
+ *  @hw: pointer to the hardware structure
+ *
+ *  Configure base address and length registers for the transmit queue
+ **/
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       u32 reg = 0;
+
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.asq.head, 0);
+       wr32(hw, hw->aq.asq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+                                 I40E_PF_ATQLEN_ATQENABLE_MASK));
+       wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
+       wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+
+       /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.asq.bal);
+       if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+               ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+       return ret_code;
+}
+
+/**
+ *  i40e_config_arq_regs - ARQ register configuration
+ *  @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       u32 reg = 0;
+
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.arq.head, 0);
+       wr32(hw, hw->aq.arq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+                                 I40E_PF_ARQLEN_ARQENABLE_MASK));
+       wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
+       wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+
+       /* Update tail in the HW to post pre-allocated buffers */
+       wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+       /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.arq.bal);
+       if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+               ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+       return ret_code;
+}
+
+/**
+ *  i40e_init_asq - main initialization routine for ASQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  This is the main initialization routine for the Admin Send Queue
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (hw->aq.asq.count > 0) {
+               /* queue already initialized */
+               ret_code = I40E_ERR_NOT_READY;
+               goto init_adminq_exit;
+       }
+
+       /* verify input for valid configuration */
+       if ((hw->aq.num_asq_entries == 0) ||
+           (hw->aq.asq_buf_size == 0)) {
+               ret_code = I40E_ERR_CONFIG;
+               goto init_adminq_exit;
+       }
+
+       hw->aq.asq.next_to_use = 0;
+       hw->aq.asq.next_to_clean = 0;
+       hw->aq.asq.count = hw->aq.num_asq_entries;
+
+       /* allocate the ring memory */
+       ret_code = i40e_alloc_adminq_asq_ring(hw);
+       if (ret_code != I40E_SUCCESS)
+               goto init_adminq_exit;
+
+       /* allocate buffers in the rings */
+       ret_code = i40e_alloc_asq_bufs(hw);
+       if (ret_code != I40E_SUCCESS)
+               goto init_adminq_free_rings;
+
+       /* initialize base registers */
+       ret_code = i40e_config_asq_regs(hw);
+       if (ret_code != I40E_SUCCESS)
+               goto init_adminq_free_rings;
+
+       /* success! */
+       goto init_adminq_exit;
+
+init_adminq_free_rings:
+       i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+       return ret_code;
+}
+
+/**
+ *  i40e_init_arq - initialize ARQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main initialization routine for the Admin Receive (Event) Queue.
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (hw->aq.arq.count > 0) {
+               /* queue already initialized */
+               ret_code = I40E_ERR_NOT_READY;
+               goto init_adminq_exit;
+       }
+
+       /* verify input for valid configuration */
+       if ((hw->aq.num_arq_entries == 0) ||
+           (hw->aq.arq_buf_size == 0)) {
+               ret_code = I40E_ERR_CONFIG;
+               goto init_adminq_exit;
+       }
+
+       hw->aq.arq.next_to_use = 0;
+       hw->aq.arq.next_to_clean = 0;
+       hw->aq.arq.count = hw->aq.num_arq_entries;
+
+       /* allocate the ring memory */
+       ret_code = i40e_alloc_adminq_arq_ring(hw);
+       if (ret_code != I40E_SUCCESS)
+               goto init_adminq_exit;
+
+       /* allocate buffers in the rings */
+       ret_code = i40e_alloc_arq_bufs(hw);
+       if (ret_code != I40E_SUCCESS)
+               goto init_adminq_free_rings;
+
+       /* initialize base registers */
+       ret_code = i40e_config_arq_regs(hw);
+       if (ret_code != I40E_SUCCESS)
+               goto init_adminq_free_rings;
+
+       /* success! */
+       goto init_adminq_exit;
+
+init_adminq_free_rings:
+       i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+       return ret_code;
+}
+
+/**
+ *  i40e_shutdown_asq - shutdown the ASQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Send Queue
+ **/
+i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+       if (hw->aq.asq.count == 0) {
+               ret_code = I40E_ERR_NOT_READY;
+               goto shutdown_asq_out;
+       }
+
+       /* Stop firmware AdminQ processing */
+       wr32(hw, hw->aq.asq.head, 0);
+       wr32(hw, hw->aq.asq.tail, 0);
+       wr32(hw, hw->aq.asq.len, 0);
+       wr32(hw, hw->aq.asq.bal, 0);
+       wr32(hw, hw->aq.asq.bah, 0);
+
+       hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+       /* free ring buffers */
+       i40e_free_asq_bufs(hw);
+
+shutdown_asq_out:
+       i40e_release_spinlock(&hw->aq.asq_spinlock);
+       return ret_code;
+}
+
+/**
+ *  i40e_shutdown_arq - shutdown ARQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Receive Queue
+ **/
+i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+       if (hw->aq.arq.count == 0) {
+               ret_code = I40E_ERR_NOT_READY;
+               goto shutdown_arq_out;
+       }
+
+       /* Stop firmware AdminQ processing */
+       wr32(hw, hw->aq.arq.head, 0);
+       wr32(hw, hw->aq.arq.tail, 0);
+       wr32(hw, hw->aq.arq.len, 0);
+       wr32(hw, hw->aq.arq.bal, 0);
+       wr32(hw, hw->aq.arq.bah, 0);
+
+       hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+       /* free ring buffers */
+       i40e_free_arq_bufs(hw);
+
+shutdown_arq_out:
+       i40e_release_spinlock(&hw->aq.arq_spinlock);
+       return ret_code;
+}
+
+/**
+ *  i40e_init_adminq - main initialization routine for Admin Queue
+ *  @hw: pointer to the hardware structure
+ *
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.num_arq_entries
+ *     - hw->aq.arq_buf_size
+ *     - hw->aq.asq_buf_size
+ **/
+i40e_status i40e_init_adminq(struct i40e_hw *hw)
+{
+       i40e_status ret_code;
+       u16 eetrack_lo, eetrack_hi;
+       u16 cfg_ptr, oem_hi, oem_lo;
+       int retry = 0;
+       /* verify input for valid configuration */
+       if ((hw->aq.num_arq_entries == 0) ||
+           (hw->aq.num_asq_entries == 0) ||
+           (hw->aq.arq_buf_size == 0) ||
+           (hw->aq.asq_buf_size == 0)) {
+               ret_code = I40E_ERR_CONFIG;
+               goto init_adminq_exit;
+       }
+
+       /* initialize spin locks */
+       i40e_init_spinlock(&hw->aq.asq_spinlock);
+       i40e_init_spinlock(&hw->aq.arq_spinlock);
+
+       /* Set up register offsets */
+       i40e_adminq_init_regs(hw);
+
+       /* setup ASQ command write back timeout */
+       hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+       /* allocate the ASQ */
+       ret_code = i40e_init_asq(hw);
+       if (ret_code != I40E_SUCCESS)
+               goto init_adminq_destroy_spinlocks;
+
+       /* allocate the ARQ */
+       ret_code = i40e_init_arq(hw);
+       if (ret_code != I40E_SUCCESS)
+               goto init_adminq_free_asq;
+
+       /* There are some cases where the firmware may not be quite ready
+        * for AdminQ operations, so we retry the AdminQ setup a few times
+        * if we see timeouts in this first AQ call.
+        */
+       do {
+               ret_code = i40e_aq_get_firmware_version(hw,
+                                                       &hw->aq.fw_maj_ver,
+                                                       &hw->aq.fw_min_ver,
+                                                       &hw->aq.fw_build,
+                                                       &hw->aq.api_maj_ver,
+                                                       &hw->aq.api_min_ver,
+                                                       NULL);
+               if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+                       break;
+               retry++;
+               msleep(100);
+               i40e_resume_aq(hw);
+       } while (retry < 10);
+       if (ret_code != I40E_SUCCESS)
+               goto init_adminq_free_arq;
+
+       /* get the NVM version info */
+       i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+                          &hw->nvm.version);
+       i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+       i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+       hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+       i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+       i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+                          &oem_hi);
+       i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+                          &oem_lo);
+       hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+
+       if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+               ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+               goto init_adminq_free_arq;
+       }
+
+       /* pre-emptive resource lock release */
+       i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+       hw->aq.nvm_release_on_done = false;
+       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+       ret_code = i40e_aq_set_hmc_resource_profile(hw,
+                                                   I40E_HMC_PROFILE_DEFAULT,
+                                                   0,
+                                                   NULL);
+       ret_code = I40E_SUCCESS;
+
+       /* success! */
+       goto init_adminq_exit;
+
+init_adminq_free_arq:
+       i40e_shutdown_arq(hw);
+init_adminq_free_asq:
+       i40e_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+       i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+       i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+       return ret_code;
+}
+
+/**
+ *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ *  @hw: pointer to the hardware structure
+ **/
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (i40e_check_asq_alive(hw))
+               i40e_aq_queue_shutdown(hw, true);
+
+       i40e_shutdown_asq(hw);
+       i40e_shutdown_arq(hw);
+
+       /* destroy the spinlocks */
+       i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+       i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+       if (hw->nvm_buff.va)
+               i40e_free_virt_mem(hw, &hw->nvm_buff);
+
+       return ret_code;
+}
+
+/**
+ *  i40e_clean_asq - cleans Admin send queue
+ *  @hw: pointer to the hardware structure
+ *
+ *  returns the number of free desc
+ **/
+u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+       struct i40e_adminq_ring *asq = &(hw->aq.asq);
+       struct i40e_asq_cmd_details *details;
+       u16 ntc = asq->next_to_clean;
+       struct i40e_aq_desc desc_cb;
+       struct i40e_aq_desc *desc;
+
+       desc = I40E_ADMINQ_DESC(*asq, ntc);
+       details = I40E_ADMINQ_DETAILS(*asq, ntc);
+
+       while (rd32(hw, hw->aq.asq.head) != ntc) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+               if (details->callback) {
+                       I40E_ADMINQ_CALLBACK cb_func =
+                                       (I40E_ADMINQ_CALLBACK)details->callback;
+                       i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
+                                   I40E_DMA_TO_DMA);
+                       cb_func(hw, &desc_cb);
+               }
+               i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
+               i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
+               ntc++;
+               if (ntc == asq->count)
+                       ntc = 0;
+               desc = I40E_ADMINQ_DESC(*asq, ntc);
+               details = I40E_ADMINQ_DETAILS(*asq, ntc);
+       }
+
+       asq->next_to_clean = ntc;
+
+       return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ *  i40e_asq_done - check if FW has processed the Admin Send Queue
+ *  @hw: pointer to the hw struct
+ *
+ *  Returns true if the firmware has processed all descriptors on the
+ *  admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40e_asq_done(struct i40e_hw *hw)
+{
+       /* AQ designers suggest use of head for better
+        * timing reliability than DD bit
+        */
+       return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ *  i40e_asq_send_command - send command to Admin Queue
+ *  @hw: pointer to the hw struct
+ *  @desc: prefilled descriptor describing the command (non DMA mem)
+ *  @buff: buffer to use for indirect commands
+ *  @buff_size: size of buffer for indirect commands
+ *  @cmd_details: pointer to command details structure
+ *
+ *  This is the main send command driver routine for the Admin Queue send
+ *  queue.  It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+                               struct i40e_aq_desc *desc,
+                               void *buff, /* can be NULL */
+                               u16  buff_size,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       i40e_status status = I40E_SUCCESS;
+       struct i40e_dma_mem *dma_buff = NULL;
+       struct i40e_asq_cmd_details *details;
+       struct i40e_aq_desc *desc_on_ring;
+       bool cmd_completed = false;
+       u16  retval = 0;
+       u32  val = 0;
+
+       i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+       hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+       if (hw->aq.asq.count == 0) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: Admin queue not initialized.\n");
+               status = I40E_ERR_QUEUE_EMPTY;
+               goto asq_send_command_error;
+       }
+
+       val = rd32(hw, hw->aq.asq.head);
+       if (val >= hw->aq.num_asq_entries) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: head overrun at %d\n", val);
+               status = I40E_ERR_QUEUE_EMPTY;
+               goto asq_send_command_error;
+       }
+
+       details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+       if (cmd_details) {
+               i40e_memcpy(details,
+                           cmd_details,
+                           sizeof(struct i40e_asq_cmd_details),
+                           I40E_NONDMA_TO_NONDMA);
+
+               /* If the cmd_details are defined copy the cookie.  The
+                * CPU_TO_LE32 is not needed here because the data is ignored
+                * by the FW, only used by the driver
+                */
+               if (details->cookie) {
+                       desc->cookie_high =
+                               CPU_TO_LE32(upper_32_bits(details->cookie));
+                       desc->cookie_low =
+                               CPU_TO_LE32(lower_32_bits(details->cookie));
+               }
+       } else {
+               i40e_memset(details, 0,
+                           sizeof(struct i40e_asq_cmd_details),
+                           I40E_NONDMA_MEM);
+       }
+
+       /* clear requested flags and then set additional flags if defined */
+       desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+       desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+       if (buff_size > hw->aq.asq_buf_size) {
+               i40e_debug(hw,
+                          I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: Invalid buffer size: %d.\n",
+                          buff_size);
+               status = I40E_ERR_INVALID_SIZE;
+               goto asq_send_command_error;
+       }
+
+       if (details->postpone && !details->async) {
+               i40e_debug(hw,
+                          I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: Async flag not set along with postpone flag");
+               status = I40E_ERR_PARAM;
+               goto asq_send_command_error;
+       }
+
+       /* call clean and check queue available function to reclaim the
+        * descriptors that were processed by FW, the function returns the
+        * number of desc available
+        */
+       /* the clean function called here could be called in a separate thread
+        * in case of asynchronous completions
+        */
+       if (i40e_clean_asq(hw) == 0) {
+               i40e_debug(hw,
+                          I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: Error queue is full.\n");
+               status = I40E_ERR_ADMIN_QUEUE_FULL;
+               goto asq_send_command_error;
+       }
+
+       /* initialize the temp desc pointer with the right desc */
+       desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+       /* if the desc is available copy the temp desc to the right place */
+       i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
+                   I40E_NONDMA_TO_DMA);
+
+       /* if buff is not NULL assume indirect command */
+       if (buff != NULL) {
+               dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+               /* copy the user buff into the respective DMA buff */
+               i40e_memcpy(dma_buff->va, buff, buff_size,
+                           I40E_NONDMA_TO_DMA);
+               desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+               /* Update the address values in the desc with the pa value
+                * for respective buffer
+                */
+               desc_on_ring->params.external.addr_high =
+                               CPU_TO_LE32(upper_32_bits(dma_buff->pa));
+               desc_on_ring->params.external.addr_low =
+                               CPU_TO_LE32(lower_32_bits(dma_buff->pa));
+       }
+
+       /* bump the tail */
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+       i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+                     buff, buff_size);
+       (hw->aq.asq.next_to_use)++;
+       if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+               hw->aq.asq.next_to_use = 0;
+       if (!details->postpone)
+               wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+       /* if cmd_details are not defined or async flag is not set,
+        * we need to wait for desc write back
+        */
+       if (!details->async && !details->postpone) {
+               u32 total_delay = 0;
+
+               do {
+                       /* AQ designers suggest use of head for better
+                        * timing reliability than DD bit
+                        */
+                       if (i40e_asq_done(hw))
+                               break;
+                       /* ugh! delay while spin_lock */
+                       usleep_range(1000, 2000);
+                       total_delay++;
+               } while (total_delay < hw->aq.asq_cmd_timeout);
+       }
+
+       /* if ready, copy the desc back to temp */
+       if (i40e_asq_done(hw)) {
+               i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
+                           I40E_DMA_TO_NONDMA);
+               if (buff != NULL)
+                       i40e_memcpy(buff, dma_buff->va, buff_size,
+                                   I40E_DMA_TO_NONDMA);
+               retval = LE16_TO_CPU(desc->retval);
+               if (retval != 0) {
+                       i40e_debug(hw,
+                                  I40E_DEBUG_AQ_MESSAGE,
+                                  "AQTX: Command completed with error 0x%X.\n",
+                                  retval);
+
+                       /* strip off FW internal code */
+                       retval &= 0xff;
+               }
+               cmd_completed = true;
+               if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+                       status = I40E_SUCCESS;
+               else
+                       status = I40E_ERR_ADMIN_QUEUE_ERROR;
+               hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+       }
+
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                  "AQTX: desc and buffer writeback:\n");
+       i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+       /* save writeback aq if requested */
+       if (details->wb_desc)
+               i40e_memcpy(details->wb_desc, desc_on_ring,
+                           sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
+
+       /* update the error if time out occurred */
+       if ((!cmd_completed) &&
+           (!details->async && !details->postpone)) {
+               i40e_debug(hw,
+                          I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: Writeback timeout.\n");
+               status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+       }
+
+asq_send_command_error:
+       i40e_release_spinlock(&hw->aq.asq_spinlock);
+       return status;
+}
+
+/**
+ *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ *  @desc:     pointer to the temp descriptor (non DMA mem)
+ *  @opcode:   the opcode can be used to decide which flags to turn off or on
+ *
+ *  Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+                                      u16 opcode)
+{
+       /* zero out the desc */
+       i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
+                   I40E_NONDMA_MEM);
+       desc->opcode = CPU_TO_LE16(opcode);
+       desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ *  i40e_clean_arq_element
+ *  @hw: pointer to the hw struct
+ *  @e: event info from the receive descriptor, includes any buffers
+ *  @pending: number of events that could be left to process
+ *
+ *  This function cleans one Admin Receive Queue element and returns
+ *  the contents through e.  It can also return how many events are
+ *  left to process through 'pending'
+ **/
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+                                            struct i40e_arq_event_info *e,
+                                            u16 *pending)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       u16 ntc = hw->aq.arq.next_to_clean;
+       struct i40e_aq_desc *desc;
+       struct i40e_dma_mem *bi;
+       u16 desc_idx;
+       u16 datalen;
+       u16 flags;
+       u16 ntu;
+
+       /* take the lock before we start messing with the ring */
+       i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+       /* set next_to_use to head */
+       ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+       if (ntu == ntc) {
+               /* nothing to do - shouldn't need to update ring's values */
+               ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+               goto clean_arq_element_out;
+       }
+
+       /* now clean the next descriptor */
+       desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+       desc_idx = ntc;
+
+       flags = LE16_TO_CPU(desc->flags);
+       if (flags & I40E_AQ_FLAG_ERR) {
+               ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+               hw->aq.arq_last_status =
+                       (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
+               i40e_debug(hw,
+                          I40E_DEBUG_AQ_MESSAGE,
+                          "AQRX: Event received with error 0x%X.\n",
+                          hw->aq.arq_last_status);
+       }
+
+       i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
+                   I40E_DMA_TO_NONDMA);
+       datalen = LE16_TO_CPU(desc->datalen);
+       e->msg_len = min(datalen, e->buf_len);
+       if (e->msg_buf != NULL && (e->msg_len != 0))
+               i40e_memcpy(e->msg_buf,
+                           hw->aq.arq.r.arq_bi[desc_idx].va,
+                           e->msg_len, I40E_DMA_TO_NONDMA);
+
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+       i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+                     hw->aq.arq_buf_size);
+
+       /* Restore the original datalen and buffer address in the desc,
+        * FW updates datalen to indicate the event message
+        * size
+        */
+       bi = &hw->aq.arq.r.arq_bi[ntc];
+       i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
+
+       desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+       if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+               desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+       desc->datalen = CPU_TO_LE16((u16)bi->size);
+       desc->params.external.addr_high = CPU_TO_LE32(upper_32_bits(bi->pa));
+       desc->params.external.addr_low = CPU_TO_LE32(lower_32_bits(bi->pa));
+
+       /* set tail = the last cleaned desc index. */
+       wr32(hw, hw->aq.arq.tail, ntc);
+       /* ntc is updated to tail + 1 */
+       ntc++;
+       if (ntc == hw->aq.num_arq_entries)
+               ntc = 0;
+       hw->aq.arq.next_to_clean = ntc;
+       hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+       /* Set pending if needed, unlock and return */
+       if (pending != NULL)
+               *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+       i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+       if (i40e_is_nvm_update_op(&e->desc)) {
+               if (hw->aq.nvm_release_on_done) {
+                       i40e_release_nvm(hw);
+                       hw->aq.nvm_release_on_done = false;
+               }
+
+               switch (hw->nvmupd_state) {
+               case I40E_NVMUPD_STATE_INIT_WAIT:
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+                       break;
+
+               case I40E_NVMUPD_STATE_WRITE_WAIT:
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+                       break;
+
+               default:
+                       break;
+               }
+       }
+
+       return ret_code;
+}
+
+void i40e_resume_aq(struct i40e_hw *hw)
+{
+       /* Registers are reset after PF reset */
+       hw->aq.asq.next_to_use = 0;
+       hw->aq.asq.next_to_clean = 0;
+
+       i40e_config_asq_regs(hw);
+
+       hw->aq.arq.next_to_use = 0;
+       hw->aq.arq.next_to_clean = 0;
+
+       i40e_config_arq_regs(hw);
+}
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq.h
new file mode 100644 (file)
index 0000000..3a80aa0
--- /dev/null
@@ -0,0 +1,160 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_status.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i)   \
+       (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+       struct i40e_virt_mem dma_head;  /* space for dma structures */
+       struct i40e_dma_mem desc_buf;   /* descriptor ring memory */
+       struct i40e_virt_mem cmd_buf;   /* command buffer memory */
+
+       union {
+               struct i40e_dma_mem *asq_bi;
+               struct i40e_dma_mem *arq_bi;
+       } r;
+
+       u16 count;              /* Number of descriptors */
+       u16 rx_buf_len;         /* Admin Receive Queue buffer length */
+
+       /* used for interrupt processing */
+       u16 next_to_use;
+       u16 next_to_clean;
+
+       /* used for queue tracking */
+       u32 head;
+       u32 tail;
+       u32 len;
+       u32 bah;
+       u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+       void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+       u64 cookie;
+       u16 flags_ena;
+       u16 flags_dis;
+       bool async;
+       bool postpone;
+       struct i40e_aq_desc *wb_desc;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i)   \
+       (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+       struct i40e_aq_desc desc;
+       u16 msg_len;
+       u16 buf_len;
+       u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+       struct i40e_adminq_ring arq;    /* receive queue */
+       struct i40e_adminq_ring asq;    /* send queue */
+       u32 asq_cmd_timeout;            /* send queue cmd write back timeout*/
+       u16 num_arq_entries;            /* receive queue depth */
+       u16 num_asq_entries;            /* send queue depth */
+       u16 arq_buf_size;               /* receive queue buffer size */
+       u16 asq_buf_size;               /* send queue buffer size */
+       u16 fw_maj_ver;                 /* firmware major version */
+       u16 fw_min_ver;                 /* firmware minor version */
+       u32 fw_build;                   /* firmware build number */
+       u16 api_maj_ver;                /* api major version */
+       u16 api_min_ver;                /* api minor version */
+       bool nvm_release_on_done;
+
+       struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
+       struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
+
+       /* last status values on send and receive queues */
+       enum i40e_admin_queue_err asq_last_status;
+       enum i40e_admin_queue_err arq_last_status;
+};
+
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
+ **/
+static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+{
+       int aq_to_posix[] = {
+               0,           /* I40E_AQ_RC_OK */
+               -EPERM,      /* I40E_AQ_RC_EPERM */
+               -ENOENT,     /* I40E_AQ_RC_ENOENT */
+               -ESRCH,      /* I40E_AQ_RC_ESRCH */
+               -EINTR,      /* I40E_AQ_RC_EINTR */
+               -EIO,        /* I40E_AQ_RC_EIO */
+               -ENXIO,      /* I40E_AQ_RC_ENXIO */
+               -E2BIG,      /* I40E_AQ_RC_E2BIG */
+               -EAGAIN,     /* I40E_AQ_RC_EAGAIN */
+               -ENOMEM,     /* I40E_AQ_RC_ENOMEM */
+               -EACCES,     /* I40E_AQ_RC_EACCES */
+               -EFAULT,     /* I40E_AQ_RC_EFAULT */
+               -EBUSY,      /* I40E_AQ_RC_EBUSY */
+               -EEXIST,     /* I40E_AQ_RC_EEXIST */
+               -EINVAL,     /* I40E_AQ_RC_EINVAL */
+               -ENOTTY,     /* I40E_AQ_RC_ENOTTY */
+               -ENOSPC,     /* I40E_AQ_RC_ENOSPC */
+               -ENOSYS,     /* I40E_AQ_RC_ENOSYS */
+               -ERANGE,     /* I40E_AQ_RC_ERANGE */
+               -EPIPE,      /* I40E_AQ_RC_EFLUSHED */
+               -ESPIPE,     /* I40E_AQ_RC_BAD_ADDR */
+               -EROFS,      /* I40E_AQ_RC_EMODE */
+               -EFBIG,      /* I40E_AQ_RC_EFBIG */
+       };
+
+       /* aq_rc is invalid if AQ timed out */
+       if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+               return -EAGAIN;
+
+       if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+               return -ERANGE;
+
+       return aq_to_posix[aq_rc];
+}
+
+/* general information */
+#define I40E_AQ_LARGE_BUF              512
+#define I40E_ASQ_CMD_TIMEOUT           250  /* msecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+                                      u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq_cmd.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_adminq_cmd.h
new file mode 100644 (file)
index 0000000..aa4a8b2
--- /dev/null
@@ -0,0 +1,2362 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR      0x0001
+#define I40E_FW_API_VERSION_MINOR      0x0004
+
+struct i40e_aq_desc {
+       __le16 flags;
+       __le16 opcode;
+       __le16 datalen;
+       __le16 retval;
+       __le32 cookie_high;
+       __le32 cookie_low;
+       union {
+               struct {
+                       __le32 param0;
+                       __le32 param1;
+                       __le32 param2;
+                       __le32 param3;
+               } internal;
+               struct {
+                       __le32 param0;
+                       __le32 param1;
+                       __le32 addr_high;
+                       __le32 addr_low;
+               } external;
+               u8 raw[16];
+       } params;
+};
+
+/* Flags sub-structure
+ * |0  |1  |2  |3  |4  |5  |6  |7  |8  |9  |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * *  RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT  0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT  9
+#define I40E_AQ_FLAG_RD_SHIFT  10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT  13
+#define I40E_AQ_FLAG_EI_SHIFT  14
+#define I40E_AQ_FLAG_FE_SHIFT  15
+
+#define I40E_AQ_FLAG_DD                (1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP       (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR       (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE       (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB                (1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD                (1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC       (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF       (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI                (1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI                (1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE                (1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+       I40E_AQ_RC_OK           = 0,  /* success */
+       I40E_AQ_RC_EPERM        = 1,  /* Operation not permitted */
+       I40E_AQ_RC_ENOENT       = 2,  /* No such element */
+       I40E_AQ_RC_ESRCH        = 3,  /* Bad opcode */
+       I40E_AQ_RC_EINTR        = 4,  /* operation interrupted */
+       I40E_AQ_RC_EIO          = 5,  /* I/O error */
+       I40E_AQ_RC_ENXIO        = 6,  /* No such resource */
+       I40E_AQ_RC_E2BIG        = 7,  /* Arg too long */
+       I40E_AQ_RC_EAGAIN       = 8,  /* Try again */
+       I40E_AQ_RC_ENOMEM       = 9,  /* Out of memory */
+       I40E_AQ_RC_EACCES       = 10, /* Permission denied */
+       I40E_AQ_RC_EFAULT       = 11, /* Bad address */
+       I40E_AQ_RC_EBUSY        = 12, /* Device or resource busy */
+       I40E_AQ_RC_EEXIST       = 13, /* object already exists */
+       I40E_AQ_RC_EINVAL       = 14, /* Invalid argument */
+       I40E_AQ_RC_ENOTTY       = 15, /* Not a typewriter */
+       I40E_AQ_RC_ENOSPC       = 16, /* No space left or alloc failure */
+       I40E_AQ_RC_ENOSYS       = 17, /* Function not implemented */
+       I40E_AQ_RC_ERANGE       = 18, /* Parameter out of range */
+       I40E_AQ_RC_EFLUSHED     = 19, /* Cmd flushed due to prev cmd error */
+       I40E_AQ_RC_BAD_ADDR     = 20, /* Descriptor contains a bad pointer */
+       I40E_AQ_RC_EMODE        = 21, /* Op not allowed in current dev mode */
+       I40E_AQ_RC_EFBIG        = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+       /* aq commands */
+       i40e_aqc_opc_get_version        = 0x0001,
+       i40e_aqc_opc_driver_version     = 0x0002,
+       i40e_aqc_opc_queue_shutdown     = 0x0003,
+       i40e_aqc_opc_set_pf_context     = 0x0004,
+
+       /* resource ownership */
+       i40e_aqc_opc_request_resource   = 0x0008,
+       i40e_aqc_opc_release_resource   = 0x0009,
+
+       i40e_aqc_opc_list_func_capabilities     = 0x000A,
+       i40e_aqc_opc_list_dev_capabilities      = 0x000B,
+
+       /* LAA */
+       i40e_aqc_opc_mac_address_read   = 0x0107,
+       i40e_aqc_opc_mac_address_write  = 0x0108,
+
+       /* PXE */
+       i40e_aqc_opc_clear_pxe_mode     = 0x0110,
+
+       /* internal switch commands */
+       i40e_aqc_opc_get_switch_config          = 0x0200,
+       i40e_aqc_opc_add_statistics             = 0x0201,
+       i40e_aqc_opc_remove_statistics          = 0x0202,
+       i40e_aqc_opc_set_port_parameters        = 0x0203,
+       i40e_aqc_opc_get_switch_resource_alloc  = 0x0204,
+
+       i40e_aqc_opc_add_vsi                    = 0x0210,
+       i40e_aqc_opc_update_vsi_parameters      = 0x0211,
+       i40e_aqc_opc_get_vsi_parameters         = 0x0212,
+
+       i40e_aqc_opc_add_pv                     = 0x0220,
+       i40e_aqc_opc_update_pv_parameters       = 0x0221,
+       i40e_aqc_opc_get_pv_parameters          = 0x0222,
+
+       i40e_aqc_opc_add_veb                    = 0x0230,
+       i40e_aqc_opc_update_veb_parameters      = 0x0231,
+       i40e_aqc_opc_get_veb_parameters         = 0x0232,
+
+       i40e_aqc_opc_delete_element             = 0x0243,
+
+       i40e_aqc_opc_add_macvlan                = 0x0250,
+       i40e_aqc_opc_remove_macvlan             = 0x0251,
+       i40e_aqc_opc_add_vlan                   = 0x0252,
+       i40e_aqc_opc_remove_vlan                = 0x0253,
+       i40e_aqc_opc_set_vsi_promiscuous_modes  = 0x0254,
+       i40e_aqc_opc_add_tag                    = 0x0255,
+       i40e_aqc_opc_remove_tag                 = 0x0256,
+       i40e_aqc_opc_add_multicast_etag         = 0x0257,
+       i40e_aqc_opc_remove_multicast_etag      = 0x0258,
+       i40e_aqc_opc_update_tag                 = 0x0259,
+       i40e_aqc_opc_add_control_packet_filter  = 0x025A,
+       i40e_aqc_opc_remove_control_packet_filter       = 0x025B,
+       i40e_aqc_opc_add_cloud_filters          = 0x025C,
+       i40e_aqc_opc_remove_cloud_filters       = 0x025D,
+
+       i40e_aqc_opc_add_mirror_rule    = 0x0260,
+       i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+       /* DCB commands */
+       i40e_aqc_opc_dcb_ignore_pfc     = 0x0301,
+       i40e_aqc_opc_dcb_updated        = 0x0302,
+
+       /* TX scheduler */
+       i40e_aqc_opc_configure_vsi_bw_limit             = 0x0400,
+       i40e_aqc_opc_configure_vsi_ets_sla_bw_limit     = 0x0406,
+       i40e_aqc_opc_configure_vsi_tc_bw                = 0x0407,
+       i40e_aqc_opc_query_vsi_bw_config                = 0x0408,
+       i40e_aqc_opc_query_vsi_ets_sla_config           = 0x040A,
+       i40e_aqc_opc_configure_switching_comp_bw_limit  = 0x0410,
+
+       i40e_aqc_opc_enable_switching_comp_ets                  = 0x0413,
+       i40e_aqc_opc_modify_switching_comp_ets                  = 0x0414,
+       i40e_aqc_opc_disable_switching_comp_ets                 = 0x0415,
+       i40e_aqc_opc_configure_switching_comp_ets_bw_limit      = 0x0416,
+       i40e_aqc_opc_configure_switching_comp_bw_config         = 0x0417,
+       i40e_aqc_opc_query_switching_comp_ets_config            = 0x0418,
+       i40e_aqc_opc_query_port_ets_config                      = 0x0419,
+       i40e_aqc_opc_query_switching_comp_bw_config             = 0x041A,
+       i40e_aqc_opc_suspend_port_tx                            = 0x041B,
+       i40e_aqc_opc_resume_port_tx                             = 0x041C,
+       i40e_aqc_opc_configure_partition_bw                     = 0x041D,
+
+       /* hmc */
+       i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+       i40e_aqc_opc_set_hmc_resource_profile   = 0x0501,
+
+       /* phy commands*/
+       i40e_aqc_opc_get_phy_abilities          = 0x0600,
+       i40e_aqc_opc_set_phy_config             = 0x0601,
+       i40e_aqc_opc_set_mac_config             = 0x0603,
+       i40e_aqc_opc_set_link_restart_an        = 0x0605,
+       i40e_aqc_opc_get_link_status            = 0x0607,
+       i40e_aqc_opc_set_phy_int_mask           = 0x0613,
+       i40e_aqc_opc_get_local_advt_reg         = 0x0614,
+       i40e_aqc_opc_set_local_advt_reg         = 0x0615,
+       i40e_aqc_opc_get_partner_advt           = 0x0616,
+       i40e_aqc_opc_set_lb_modes               = 0x0618,
+       i40e_aqc_opc_get_phy_wol_caps           = 0x0621,
+       i40e_aqc_opc_set_phy_debug              = 0x0622,
+       i40e_aqc_opc_upload_ext_phy_fm          = 0x0625,
+
+       /* NVM commands */
+       i40e_aqc_opc_nvm_read                   = 0x0701,
+       i40e_aqc_opc_nvm_erase                  = 0x0702,
+       i40e_aqc_opc_nvm_update                 = 0x0703,
+       i40e_aqc_opc_nvm_config_read            = 0x0704,
+       i40e_aqc_opc_nvm_config_write           = 0x0705,
+       i40e_aqc_opc_oem_post_update            = 0x0720,
+
+       /* virtualization commands */
+       i40e_aqc_opc_send_msg_to_pf             = 0x0801,
+       i40e_aqc_opc_send_msg_to_vf             = 0x0802,
+       i40e_aqc_opc_send_msg_to_peer           = 0x0803,
+
+       /* alternate structure */
+       i40e_aqc_opc_alternate_write            = 0x0900,
+       i40e_aqc_opc_alternate_write_indirect   = 0x0901,
+       i40e_aqc_opc_alternate_read             = 0x0902,
+       i40e_aqc_opc_alternate_read_indirect    = 0x0903,
+       i40e_aqc_opc_alternate_write_done       = 0x0904,
+       i40e_aqc_opc_alternate_set_mode         = 0x0905,
+       i40e_aqc_opc_alternate_clear_port       = 0x0906,
+
+       /* LLDP commands */
+       i40e_aqc_opc_lldp_get_mib       = 0x0A00,
+       i40e_aqc_opc_lldp_update_mib    = 0x0A01,
+       i40e_aqc_opc_lldp_add_tlv       = 0x0A02,
+       i40e_aqc_opc_lldp_update_tlv    = 0x0A03,
+       i40e_aqc_opc_lldp_delete_tlv    = 0x0A04,
+       i40e_aqc_opc_lldp_stop          = 0x0A05,
+       i40e_aqc_opc_lldp_start         = 0x0A06,
+       i40e_aqc_opc_get_cee_dcb_cfg    = 0x0A07,
+       i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
+       i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+
+       /* Tunnel commands */
+       i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
+       i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
+
+       /* Async Events */
+       i40e_aqc_opc_event_lan_overflow         = 0x1001,
+
+       /* OEM commands */
+       i40e_aqc_opc_oem_parameter_change       = 0xFE00,
+       i40e_aqc_opc_oem_device_status_change   = 0xFE01,
+       i40e_aqc_opc_oem_ocsd_initialize        = 0xFE02,
+       i40e_aqc_opc_oem_ocbb_initialize        = 0xFE03,
+
+       /* debug commands */
+       i40e_aqc_opc_debug_read_reg             = 0xFF03,
+       i40e_aqc_opc_debug_write_reg            = 0xFF04,
+       i40e_aqc_opc_debug_modify_reg           = 0xFF07,
+       i40e_aqc_opc_debug_dump_internals       = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+       { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X)       I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+       __le32 rom_ver;
+       __le32 fw_build;
+       __le16 fw_major;
+       __le16 fw_minor;
+       __le16 api_major;
+       __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+       u8      driver_major_ver;
+       u8      driver_minor_ver;
+       u8      driver_build_ver;
+       u8      driver_subbuild_ver;
+       u8      reserved[4];
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+       __le32  driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING       0x1
+       u8      reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+       u8      pf_id;
+       u8      reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM                   1
+#define I40E_AQ_RESOURCE_SDP                   2
+#define I40E_AQ_RESOURCE_ACCESS_READ           1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE          2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT      3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT     180000
+
+struct i40e_aqc_request_resource {
+       __le16  resource_id;
+       __le16  access_type;
+       __le32  timeout;
+       __le32  resource_number;
+       u8      reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+       u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN   1
+       u8 pf_index;
+       u8 reserved[2];
+       __le32 count;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+       __le16  id;
+       u8      major_rev;
+       u8      minor_rev;
+       __le32  number;
+       __le32  logical_id;
+       __le32  phys_id;
+       u8      reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE     0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE                0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE     0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP      0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM   0x0006
+#define I40E_AQ_CAP_ID_SRIOV           0x0012
+#define I40E_AQ_CAP_ID_VF              0x0013
+#define I40E_AQ_CAP_ID_VMDQ            0x0014
+#define I40E_AQ_CAP_ID_8021QBG         0x0015
+#define I40E_AQ_CAP_ID_8021QBR         0x0016
+#define I40E_AQ_CAP_ID_VSI             0x0017
+#define I40E_AQ_CAP_ID_DCB             0x0018
+#define I40E_AQ_CAP_ID_FCOE            0x0021
+#define I40E_AQ_CAP_ID_ISCSI           0x0022
+#define I40E_AQ_CAP_ID_RSS             0x0040
+#define I40E_AQ_CAP_ID_RXQ             0x0041
+#define I40E_AQ_CAP_ID_TXQ             0x0042
+#define I40E_AQ_CAP_ID_MSIX            0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX         0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR   0x0045
+#define I40E_AQ_CAP_ID_1588            0x0046
+#define I40E_AQ_CAP_ID_IWARP           0x0051
+#define I40E_AQ_CAP_ID_LED             0x0061
+#define I40E_AQ_CAP_ID_SDP             0x0062
+#define I40E_AQ_CAP_ID_MDIO            0x0063
+#define I40E_AQ_CAP_ID_FLEX10          0x00F1
+#define I40E_AQ_CAP_ID_CEM             0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+       __le16  command_flags;
+#define I40E_AQ_CPPM_EN_LTRC   0x0800
+#define I40E_AQ_CPPM_EN_DMCTH  0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC   0x4000
+#define I40E_AQ_CPPM_EN_DMARC  0x8000
+       __le16  ttlx;
+       __le32  dmacr;
+       __le16  dmcth;
+       u8      hptc;
+       u8      reserved;
+       __le32  pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+       __le16  command_flags;
+#define I40E_AQ_ARP_INIT_IPV4  0x0008
+#define I40E_AQ_ARP_UNSUP_CTL  0x0010
+#define I40E_AQ_ARP_ENA                0x0020
+#define I40E_AQ_ARP_ADD_IPV4   0x0040
+#define I40E_AQ_ARP_DEL_IPV4   0x0080
+       __le16  table_id;
+       __le32  pfpm_proxyfc;
+       __le32  ip_addr;
+       u8      mac_addr[6];
+       u8      reserved[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+       __le16  table_idx_mac_addr_0;
+       __le16  table_idx_mac_addr_1;
+       __le16  table_idx_ipv6_0;
+       __le16  table_idx_ipv6_1;
+       __le16  control;
+#define I40E_AQ_NS_PROXY_ADD_0         0x0100
+#define I40E_AQ_NS_PROXY_DEL_0         0x0200
+#define I40E_AQ_NS_PROXY_ADD_1         0x0400
+#define I40E_AQ_NS_PROXY_DEL_1         0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0    0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0    0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1    0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1    0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ   0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL  0x0004
+       u8      mac_addr_0[6];
+       u8      mac_addr_1[6];
+       u8      local_mac_addr[6];
+       u8      ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+       u8      ipv6_addr_1[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+       __le16  command_flags;
+#define I40E_AQ_LAA_FLAG_WR    0x8000
+       u8      reserved[2];
+       __le32  sal;
+       __le16  sah;
+       u8      reserved2[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+       __le16  command_flags;
+#define I40E_AQC_LAN_ADDR_VALID                0x10
+#define I40E_AQC_SAN_ADDR_VALID                0x20
+#define I40E_AQC_PORT_ADDR_VALID       0x40
+#define I40E_AQC_WOL_ADDR_VALID                0x80
+#define I40E_AQC_MC_MAG_EN_VALID       0x100
+#define I40E_AQC_ADDR_VALID_MASK       0x1F0
+       u8      reserved[6];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+       u8 pf_lan_mac[6];
+       u8 pf_san_mac[6];
+       u8 port_mac[6];
+       u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+       __le16  command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY   0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL    0x4000
+#define I40E_AQC_WRITE_TYPE_PORT       0x8000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG      0xC000
+#define I40E_AQC_WRITE_TYPE_MASK       0xC000
+
+       __le16  mac_sah;
+       __le32  mac_sal;
+       u8      reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response  (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+       u8      rx_cnt;
+       u8      reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+       __le16  seid;
+       u8      reserved[6];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+       __le16  num_reported;
+       __le16  num_total;
+       u8      reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
+struct i40e_aqc_switch_config_element_resp {
+       u8      element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC       1
+#define I40E_AQ_SW_ELEM_TYPE_PF                2
+#define I40E_AQ_SW_ELEM_TYPE_VF                3
+#define I40E_AQ_SW_ELEM_TYPE_EMP       4
+#define I40E_AQ_SW_ELEM_TYPE_BMC       5
+#define I40E_AQ_SW_ELEM_TYPE_PV                16
+#define I40E_AQ_SW_ELEM_TYPE_VEB       17
+#define I40E_AQ_SW_ELEM_TYPE_PA                18
+#define I40E_AQ_SW_ELEM_TYPE_VSI       19
+       u8      revision;
+#define I40E_AQ_SW_ELEM_REV_1          1
+       __le16  seid;
+       __le16  uplink_seid;
+       __le16  downlink_seid;
+       u8      reserved[3];
+       u8      connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR      0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT      0x2
+#define I40E_AQ_CONN_TYPE_CASCADED     0x3
+       __le16  scheduler_id;
+       __le16  element_info;
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
+/* Get Switch Configuration (indirect 0x0200)
+ *    an array of elements are returned in the response buffer
+ *    the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+       struct i40e_aqc_get_switch_config_header_resp   header;
+       struct i40e_aqc_switch_config_element_resp      element[1];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+       __le16  seid;
+       __le16  vlan;
+       __le16  stat_index;
+       u8      reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+       __le16  command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS  1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA   4
+       __le16  bad_frame_vsi;
+       __le16  default_seid;        /* reserved for command */
+       u8      reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+       u8      num_entries;         /* reserved for command */
+       u8      reserved[7];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+       u8      resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB              0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI              0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR          0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG             0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG             0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH   0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH     0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN             0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY   0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY  0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL   0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE      0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS       0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS     0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS        0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS       0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS      0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS         0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS     0x13
+       u8      reserved1;
+       __le16  guaranteed;
+       __le16  total;
+       __le16  used;
+       __le16  total_unalloced;
+       u8      reserved2[6];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
+/* Add VSI (indirect 0x0210)
+ *    this indirect command uses struct i40e_aqc_vsi_properties_data
+ *    as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ *     uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ *     uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+       __le16  uplink_seid;
+       u8      connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL   0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT  0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+       u8      reserved1;
+       u8      vf_id;
+       u8      reserved2;
+       __le16  vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT         0x0
+#define I40E_AQ_VSI_TYPE_MASK          (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF            0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2         0x1
+#define I40E_AQ_VSI_TYPE_PF            0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG       0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV   0x4
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+       __le16 seid;
+       __le16 vsi_number;
+       __le16 vsi_used;
+       __le16 vsi_free;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+       /* first 96 byte are written by SW */
+       __le16  valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID          0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID                0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID            0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID          0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID      0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID       0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID       0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID       0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID                0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID           0x0200
+       /* switch section */
+       __le16  switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT                0x0000
+#define I40E_AQ_VSI_SW_ID_MASK         (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG        0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB        0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB        0x4000
+       u8      sw_reserved[2];
+       /* security section */
+       u8      sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD   0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK   0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK    0x04
+       u8      sec_reserved;
+       /* VLAN section */
+       __le16  pvid; /* VLANS include priority bits */
+       __le16  fcoe_pvid;
+       u8      port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT   0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK    (0x03 << \
+                                        I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED  0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED        0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL     0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID  0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT   0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK    (0x3 << \
+                                        I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH        0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP  0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR     0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+       u8      pvlan_reserved[3];
+       /* ingress egress up sections */
+       __le32  ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK  (0x7 << \
+                                        I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK  (0x7 << \
+                                        I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK  (0x7 << \
+                                        I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK  (0x7 << \
+                                        I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK  (0x7 << \
+                                        I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK  (0x7 << \
+                                        I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK  (0x7 << \
+                                        I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK  (0x7 << \
+                                        I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+       __le32  egress_table;   /* same defines as for ingress table */
+       /* cascaded PV section */
+       __le16  cas_pv_tag;
+       u8      cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT          0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK           (0x03 << \
+                                                I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE          0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE         0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY           0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG          0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE          0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG     0x40
+       u8      cas_pv_reserved;
+       /* queue mapping section */
+       __le16  mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG     0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG  0x1
+       __le16  queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT                0x0
+#define I40E_AQ_VSI_QUEUE_MASK         (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+       __le16  tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT        0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+                                        I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT        9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+                                        I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+       /* queueing option section */
+       u8      queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA    0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA   0x20
+       u8      queueing_opt_reserved[3];
+       /* scheduler section */
+       u8      up_enable_bits;
+       u8      sched_reserved;
+       /* outer up section */
+       __le32  outer_up_table; /* same structure and defines as ingress table */
+       u8      cmd_reserved[8];
+       /* last 32 bytes are written by FW */
+       __le16  qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID  0xFFFF
+       __le16  stat_counter_idx;
+       __le16  sched_id;
+       u8      resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+       __le16  command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE               0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN   0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN   0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT          0x8
+       __le16  uplink_seid;
+       __le16  connected_seid;
+       u8      reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+       /* reserved for update; for add also encodes error if rc == ENOSPC */
+       __le16  pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV     0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED  0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER        0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY  0x8
+       u8      reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+       __le16  seid;
+       __le16  default_stag;
+       __le16  pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE                        0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG      0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG      0x4
+       u8      reserved[8];
+       __le16  default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+       __le16  uplink_seid;
+       __le16  downlink_seid;
+       __le16  veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING              0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT       1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK                (0x3 << \
+                                       I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT     0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA                0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER      0x8
+       u8      enable_tcs;
+       u8      reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+       u8      reserved[6];
+       __le16  switch_seid;
+       /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+       __le16  veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB           0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED         0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER       0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY         0x8
+       __le16  statistic_index;
+       __le16  vebs_used;
+       __le16  vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+       __le16  seid;
+       __le16  switch_id;
+       __le16  veb_flags; /* only the first/last flags from 0x0230 is valid */
+       __le16  statistic_index;
+       __le16  vebs_used;
+       __le16  vebs_free;
+       u8      reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+       __le16  num_addresses;
+       __le16  seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT    0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK     (0x3FF << \
+                                       I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID                0x8000
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+       u8      mac_addr[6];
+       __le16  vlan_tag;
+       __le16  flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH     0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH                0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN       0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE          0x0008
+       __le16  queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT       0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK                (0x7FF << \
+                                       I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+       /* response section */
+       u8      match_method;
+#define I40E_AQC_MM_PERFECT_MATCH      0x01
+#define I40E_AQC_MM_HASH_MATCH         0x02
+#define I40E_AQC_MM_ERR_NO_RES         0xFF
+       u8      reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+       __le16 perfect_mac_used;
+       __le16 perfect_mac_free;
+       __le16 unicast_hash_free;
+       __le16 multicast_hash_free;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+       u8      mac_addr[6];
+       __le16  vlan_tag;
+       u8      flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH     0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH                0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN       0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS          0x10
+       u8      reserved[3];
+       /* reply section */
+       u8      error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS                0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL           0xFF
+       u8      reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+       __le16  vlan_tag;
+       u8      vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL                        0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT          1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK   (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR                0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY                0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY      0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT              3
+#define I40E_AQC_VLAN_PTYPE_MASK       (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI                0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI                0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI      0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI       0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL       0x1
+       u8      reserved;
+       u8      result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS      0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE        0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS   0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL      0xFF
+       u8      reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+       u8      reserved[4];
+       __le16  vlans_used;
+       __le16  vlans_free;
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+       __le16  promiscuous_flags;
+       __le16  valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST       0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST     0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST     0x04
+#define I40E_AQC_SET_VSI_DEFAULT               0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN          0x10
+       __le16  seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK                0x3FF
+       __le16  vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK             0x0FFF
+#define I40E_AQC_SET_VSI_VLAN_VALID            0x8000
+       u8      reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+       __le16  flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE         0x0001
+       __le16  seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT    0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK     (0x3FF << \
+                                       I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+       __le16  tag;
+       __le16  queue_number;
+       u8      reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+       u8      reserved[12];
+       __le16  tags_used;
+       __le16  tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+       __le16  seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK  (0x3FF << \
+                                       I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+       __le16  tag;
+       u8      reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+       __le16  pv_seid;
+       __le16  etag;
+       u8      num_unicast_etags;
+       u8      reserved[3];
+       __le32  addr_high;          /* address of array of 2-byte s-tags */
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+       u8      reserved[4];
+       __le16  mcast_etags_used;
+       __le16  mcast_etags_free;
+       __le32  addr_high;
+       __le32  addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+       __le16  seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK  (0x3FF << \
+                                       I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+       __le16  old_tag;
+       __le16  new_tag;
+       u8      reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+       u8      reserved[12];
+       __le16  tags_used;
+       __le16  tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+       u8      mac[6];
+       __le16  etype;
+       __le16  flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC   0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP         0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE     0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX           0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX           0x0000
+       __le16  seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK  (0x3FF << \
+                               I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+       __le16  queue;
+       u8      reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+       __le16  mac_etype_used;
+       __le16  etype_used;
+       __le16  mac_etype_free;
+       __le16  etype_free;
+       u8      reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+       u8      num_filters;
+       u8      reserved;
+       __le16  seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT  0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK   (0x3FF << \
+                                       I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+       u8      reserved2[4];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+       u8      outer_mac[6];
+       u8      inner_mac[6];
+       __le16  inner_vlan;
+       union {
+               struct {
+                       u8 reserved[12];
+                       u8 data[4];
+               } v4;
+               struct {
+                       u8 data[16];
+               } v6;
+       } ipaddr;
+       __le16  flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                        0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+                                       I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP                  0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN           0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID    0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID          0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC                 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC                 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC     0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP                  0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE              0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT                   6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK                    0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4                  0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6                  0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT              9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK               0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN              0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC         1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                        2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                 3
+
+       __le32  tenant_id;
+       u8      reserved[4];
+       __le16  queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT         0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK          (0x7FF << \
+                                                I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+       u8      reserved2[14];
+       /* response section */
+       u8      allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS      0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL         0xFF
+       u8      response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+       __le16 perfect_ovlan_used;
+       __le16 perfect_ovlan_free;
+       __le16 vlan_used;
+       __le16 vlan_free;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ *       take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+       __le16 seid;
+       __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT                0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK         (0x7 << \
+                                               I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS        1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN         3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS  4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS   5
+       __le16 num_entries;
+       __le16 destination;  /* VSI for add, rule id for delete */
+       __le32 addr_high;    /* address of array of 2-byte VSI or VLAN ids */
+       __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+       u8      reserved[2];
+       __le16  rule_id;  /* only used on add */
+       __le16  mirror_rules_used;
+       __le16  mirror_rules_free;
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ *    the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+       u8      tc_bitmap;
+       u8      command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET                0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR      0x0
+       u8      reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+       __le16  vsi_seid;
+       u8      reserved[6];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+       __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+       __le16  vsi_seid;
+       u8      reserved[2];
+       __le16  credit;
+       u8      reserved1[2];
+       u8      max_credit; /* 0-3, limit = 2^max */
+       u8      reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ *    responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+       u8      tc_valid_bits;
+       u8      reserved[15];
+       __le16  tc_bw_credits[8]; /* FW writesback QS handles here */
+
+       /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+       __le16  tc_bw_max[2];
+       u8      reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ *    responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+       u8      tc_valid_bits;
+       u8      reserved[3];
+       u8      tc_bw_credits[8];
+       u8      reserved1[4];
+       __le16  qs_handles[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+       u8      tc_valid_bits;
+       u8      tc_suspended_bits;
+       u8      reserved[14];
+       __le16  qs_handles[8];
+       u8      reserved1[4];
+       __le16  port_bw_limit;
+       u8      reserved2[2];
+       u8      max_bw; /* 0-3, limit = 2^max */
+       u8      reserved3[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+       u8      tc_valid_bits;
+       u8      reserved[3];
+       u8      share_credits[8];
+       __le16  credits[8];
+
+       /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+       __le16  tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+       __le16  seid;
+       u8      reserved[2];
+       __le16  credit;
+       u8      reserved1[2];
+       u8      max_bw; /* 0-3, limit = 2^max */
+       u8      reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable  Physical Port ETS (indirect 0x0413)
+ * Modify  Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+       u8      reserved[4];
+       u8      tc_valid_bits;
+       u8      seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK    0x1
+       u8      tc_strict_priority_flags;
+       u8      reserved1[17];
+       u8      tc_bw_share_credits[8];
+       u8      reserved2[96];
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+       u8      tc_valid_bits;
+       u8      reserved[15];
+       __le16  tc_bw_credit[8];
+
+       /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+       __le16  tc_bw_max[2];
+       u8      reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+       u8      tc_valid_bits;
+       u8      reserved[2];
+       u8      absolute_credits; /* bool */
+       u8      tc_bw_share_credits[8];
+       u8      reserved1[20];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+       u8      tc_valid_bits;
+       u8      reserved[35];
+       __le16  port_bw_limit;
+       u8      reserved1[2];
+       u8      tc_bw_max; /* 0-3, limit = 2^max */
+       u8      reserved2[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+       u8      reserved[4];
+       u8      tc_valid_bits;
+       u8      reserved1;
+       u8      tc_strict_priority_bits;
+       u8      reserved2;
+       u8      tc_bw_share_credits[8];
+       __le16  tc_bw_limits[8];
+
+       /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+       __le16  tc_bw_max[2];
+       u8      reserved3[32];
+};
+
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+       u8      tc_valid_bits;
+       u8      reserved[2];
+       u8      absolute_credits_enable; /* bool */
+       u8      tc_bw_share_credits[8];
+       __le16  tc_bw_limits[8];
+
+       /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+       __le16  tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+       __le16  pf_valid_bits;
+       u8      min_bw[16];      /* guaranteed bandwidth */
+       u8      max_bw[16];      /* bandwidth limit */
+};
+
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+       u8      pm_profile;
+       u8      pe_vf_enabled;
+       u8      reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+       /* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
+       I40E_HMC_PROFILE_DEFAULT        = 1,
+       I40E_HMC_PROFILE_FAVOR_VF       = 2,
+       I40E_HMC_PROFILE_EQUAL          = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK       0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK    0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES   0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES      0x0002
+
+enum i40e_aq_phy_type {
+       I40E_PHY_TYPE_SGMII                     = 0x0,
+       I40E_PHY_TYPE_1000BASE_KX               = 0x1,
+       I40E_PHY_TYPE_10GBASE_KX4               = 0x2,
+       I40E_PHY_TYPE_10GBASE_KR                = 0x3,
+       I40E_PHY_TYPE_40GBASE_KR4               = 0x4,
+       I40E_PHY_TYPE_XAUI                      = 0x5,
+       I40E_PHY_TYPE_XFI                       = 0x6,
+       I40E_PHY_TYPE_SFI                       = 0x7,
+       I40E_PHY_TYPE_XLAUI                     = 0x8,
+       I40E_PHY_TYPE_XLPPI                     = 0x9,
+       I40E_PHY_TYPE_40GBASE_CR4_CU            = 0xA,
+       I40E_PHY_TYPE_10GBASE_CR1_CU            = 0xB,
+       I40E_PHY_TYPE_10GBASE_AOC               = 0xC,
+       I40E_PHY_TYPE_40GBASE_AOC               = 0xD,
+       I40E_PHY_TYPE_100BASE_TX                = 0x11,
+       I40E_PHY_TYPE_1000BASE_T                = 0x12,
+       I40E_PHY_TYPE_10GBASE_T                 = 0x13,
+       I40E_PHY_TYPE_10GBASE_SR                = 0x14,
+       I40E_PHY_TYPE_10GBASE_LR                = 0x15,
+       I40E_PHY_TYPE_10GBASE_SFPP_CU           = 0x16,
+       I40E_PHY_TYPE_10GBASE_CR1               = 0x17,
+       I40E_PHY_TYPE_40GBASE_CR4               = 0x18,
+       I40E_PHY_TYPE_40GBASE_SR4               = 0x19,
+       I40E_PHY_TYPE_40GBASE_LR4               = 0x1A,
+       I40E_PHY_TYPE_1000BASE_SX               = 0x1B,
+       I40E_PHY_TYPE_1000BASE_LX               = 0x1C,
+       I40E_PHY_TYPE_1000BASE_T_OPTICAL        = 0x1D,
+       I40E_PHY_TYPE_20GBASE_KR2               = 0x1E,
+       I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT    0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT   0x2
+#define I40E_LINK_SPEED_10GB_SHIFT     0x3
+#define I40E_LINK_SPEED_40GB_SHIFT     0x4
+#define I40E_LINK_SPEED_20GB_SHIFT     0x5
+
+enum i40e_aq_link_speed {
+       I40E_LINK_SPEED_UNKNOWN = 0,
+       I40E_LINK_SPEED_100MB   = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+       I40E_LINK_SPEED_1GB     = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+       I40E_LINK_SPEED_10GB    = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+       I40E_LINK_SPEED_40GB    = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+       I40E_LINK_SPEED_20GB    = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+       u8 oui[3];
+       u8 reserved1;
+       u8 part_number[16];
+       u8 revision[4];
+       u8 reserved2[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
+struct i40e_aq_get_phy_abilities_resp {
+       __le32  phy_type;       /* bitmap using the above enum for offsets */
+       u8      link_speed;     /* bitmap using the above enum bit patterns */
+       u8      abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX      0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX      0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER     0x04
+#define I40E_AQ_PHY_LINK_ENABLED       0x08
+#define I40E_AQ_PHY_AN_ENABLED         0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL   0x20
+       __le16  eee_capability;
+#define I40E_AQ_EEE_100BASE_TX         0x0002
+#define I40E_AQ_EEE_1000BASE_T         0x0004
+#define I40E_AQ_EEE_10GBASE_T          0x0008
+#define I40E_AQ_EEE_1000BASE_KX                0x0010
+#define I40E_AQ_EEE_10GBASE_KX4                0x0020
+#define I40E_AQ_EEE_10GBASE_KR         0x0040
+       __le32  eeer_val;
+       u8      d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA    0x01
+       u8      reserved[3];
+       u8      phy_id[4];
+       u8      module_type[3];
+       u8      qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS            16
+       struct i40e_aqc_module_desc     qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+       __le32  phy_type;
+       u8      link_speed;
+       u8      abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK                0x08
+#define I40E_AQ_PHY_ENABLE_AN          0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+       __le16  eee_capability;
+       __le32  eeer;
+       u8      low_power_ctrl;
+       u8      reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+       __le16  max_frame_size;
+       u8      params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN          0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK     0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT    3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE     0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX  0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX  0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX  0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX  0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX  0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX  0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX  0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX  0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX  0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX  0x1
+       u8      tx_timer_priority; /* bitmap */
+       __le16  tx_timer_value;
+       __le16  fc_refresh_threshold;
+       u8      reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+       u8      command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE        0x04
+       u8      reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+       __le16  command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK               0x3
+#define I40E_AQ_LSE_NOP                        0x0
+#define I40E_AQ_LSE_DISABLE            0x2
+#define I40E_AQ_LSE_ENABLE             0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED         0x1
+       u8      phy_type;    /* i40e_aq_phy_type   */
+       u8      link_speed;  /* i40e_aq_link_speed */
+       u8      link_info;
+#define I40E_AQ_LINK_UP                        0x01    /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION       0x01
+#define I40E_AQ_LINK_FAULT             0x02
+#define I40E_AQ_LINK_FAULT_TX          0x04
+#define I40E_AQ_LINK_FAULT_RX          0x08
+#define I40E_AQ_LINK_FAULT_REMOTE      0x10
+#define I40E_AQ_LINK_UP_PORT           0x20
+#define I40E_AQ_MEDIA_AVAILABLE                0x40
+#define I40E_AQ_SIGNAL_DETECT          0x80
+       u8      an_info;
+#define I40E_AQ_AN_COMPLETED           0x01
+#define I40E_AQ_LP_AN_ABILITY          0x02
+#define I40E_AQ_PD_FAULT               0x04
+#define I40E_AQ_FEC_EN                 0x08
+#define I40E_AQ_PHY_LOW_POWER          0x10
+#define I40E_AQ_LINK_PAUSE_TX          0x20
+#define I40E_AQ_LINK_PAUSE_RX          0x40
+#define I40E_AQ_QUALIFIED_MODULE       0x80
+       u8      ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM    0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS   0x02
+#define I40E_AQ_LINK_TX_SHIFT          0x02
+#define I40E_AQ_LINK_TX_MASK           (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE         0x00
+#define I40E_AQ_LINK_TX_DRAINED                0x01
+#define I40E_AQ_LINK_TX_FLUSHED                0x03
+#define I40E_AQ_LINK_FORCED_40G                0x10
+       u8      loopback; /* use defines from i40e_aqc_set_lb_mode */
+       __le16  max_frame_size;
+       u8      config;
+#define I40E_AQ_CONFIG_CRC_ENA         0x04
+#define I40E_AQ_CONFIG_PACING_MASK     0x78
+       u8      reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+       u8      reserved[8];
+       __le16  event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN      0x0002
+#define I40E_AQ_EVENT_MEDIA_NA         0x0004
+#define I40E_AQ_EVENT_LINK_FAULT       0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM   0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT    0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED     0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED        0x0200
+       u8      reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+       __le32  local_an_reg0;
+       __le16  local_an_reg1;
+       u8      reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+       __le16  lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL   0x01
+#define I40E_AQ_LB_PHY_REMOTE  0x02
+#define I40E_AQ_LB_MAC_LOCAL   0x04
+       u8      reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+       u8      command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL       0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK  (0x03 << \
+                                       I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE  0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD  0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT  0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW      0x10
+       u8      reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
+
+enum i40e_aq_phy_reg_type {
+       I40E_AQC_PHY_REG_INTERNAL       = 0x1,
+       I40E_AQC_PHY_REG_EXERNAL_BASET  = 0x2,
+       I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+       u8      command_flags;
+#define I40E_AQ_NVM_LAST_CMD   0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+       u8      module_pointer;
+       __le16  length;
+       __le32  offset;
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+       __le16  cmd_flags;
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK  1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE               0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES            1
+       __le16  element_count;
+       __le16  element_id;     /* Feature/field ID */
+       __le16  element_id_msw; /* MSWord of field ID */
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+       __le16  cmd_flags;
+       __le16  element_count;
+       u8      reserved[4];
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT                1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK         (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE                           0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD                   (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+struct i40e_aqc_nvm_config_data_feature {
+       __le16 feature_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY           0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP          0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR            0x10
+       __le16 feature_options;
+       __le16 feature_selection;
+};
+
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+       __le32 field_id;
+       __le32 field_value;
+       __le16 field_options;
+       __le16 reserved;
+};
+
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+ struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA      0x01
+       u8 sel_data;
+       u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+       u8 str_len;
+       u8 dev_addr;
+       __le16 eeprom_addr;
+       u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+       __le32  id;
+       u8      reserved[4];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+       __le32 address0;
+       __le32 data0;
+       __le32 address1;
+       __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+       __le32 address;
+       __le32 length;
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+       __le16  cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK       1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY     0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI       1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED         2
+       u8      reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+       __le32  mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE    0
+#define I40E_AQ_ALTERNATE_MODE_OEM     1
+       u8      reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+       __le32  prtdcb_rupto;
+       __le32  otx_ctl;
+       u8      reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+       u8      type;
+       u8      reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK             0x3
+#define I40E_AQ_LLDP_MIB_LOCAL                 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE                        0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE      0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK          0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT         0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE        0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR      0x1
+#define I40E_AQ_LLDP_TX_SHIFT                  0x4
+#define I40E_AQ_LLDP_TX_MASK                   (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+       __le16  local_len;
+       __le16  remote_len;
+       u8      reserved2[2];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+       u8      command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE        0x1
+       u8      reserved[7];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+       u8      type; /* only nearest bridge and non-TPMR from 0x0A00 */
+       u8      reserved1[1];
+       __le16  len;
+       u8      reserved2[4];
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+       u8      type; /* only nearest bridge and non-TPMR from 0x0A00 */
+       u8      reserved;
+       __le16  old_len;
+       __le16  new_offset;
+       __le16  new_len;
+       __le32  addr_high;
+       __le32  addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+       u8      command;
+#define I40E_AQ_LLDP_AGENT_STOP                0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN    0x1
+       u8      reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+       u8      command;
+#define I40E_AQ_LLDP_AGENT_START       0x1
+       u8      reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define I40E_AQC_CEE_APP_FCOE_SHIFT    0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK     (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT   0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK    (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT     0x8
+#define I40E_AQC_CEE_APP_FIP_MASK      (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
+#define I40E_AQC_CEE_PG_STATUS_SHIFT   0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK    (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT  0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK   (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT  0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK   (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK  (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT        0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT  0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK   (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct.  Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
+       u8      reserved1;
+       u8      oper_num_tc;
+       u8      oper_prio_tc[4];
+       u8      reserved2;
+       u8      oper_tc_bw[8];
+       u8      oper_pfc_en;
+       u8      reserved3[2];
+       __le16  oper_app_prio;
+       u8      reserved4[2];
+       __le16  tlv_status;
+};
+
+I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct i40e_aqc_get_cee_dcb_cfg_resp {
+       u8      oper_num_tc;
+       u8      oper_prio_tc[4];
+       u8      oper_tc_bw[8];
+       u8      oper_pfc_en;
+       __le16  oper_app_prio;
+       __le32  tlv_status;
+       u8      reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+
+/*     Set Local LLDP MIB (indirect 0x0A08)
+ *     Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT       0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK        (1 << \
+                                       SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB        0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT   (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK    (1 << \
+                               SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS         0x1
+       u8      type;
+       u8      reserved0;
+       __le16  length;
+       u8      reserved1[4];
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
+
+/*     Stop/Start LLDP Agent (direct 0x0A09)
+ *     Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_stop_start_specific_agent {
+#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT    0
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK     (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+       u8      command;
+       u8      reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+       __le16  udp_port;
+       u8      reserved0[3];
+       u8      protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN     0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE       0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO    0x10
+       u8      reserved1[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+       __le16 udp_port;
+       u8      filter_entry_index;
+       u8      multiple_pfs;
+#define I40E_AQC_SINGLE_PF             0x0
+#define I40E_AQC_MULTIPLE_PFS          0x1
+       u8      total_filters;
+       u8      reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+       u8      reserved[2];
+       u8      index; /* 0 to 15 */
+       u8      reserved2[13];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+       __le16  udp_port;
+       u8      index; /* 0 to 15 */
+       u8      multiple_pfs;
+       u8      total_filters_used;
+       u8      reserved1[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure {
+       u8      key1_off;
+       u8      key2_off;
+       u8      key1_len;  /* 0 to 15 */
+       u8      key2_len;  /* 0 to 15 */
+       u8      flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE    0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS     0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED    0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN  0x03
+       u8      network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN               0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE                 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP     0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE                 0x3
+       u8      reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+       __le32  param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL  0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL  1
+#define I40E_AQ_OEM_PARAM_MAC          2
+       __le32  param_value1;
+       __le16  param_value2;
+       u8      reserved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+       __le32  state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN    0x0
+#define I40E_AQ_OEM_STATE_LINK_UP      0x1
+       u8      reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+       u8 type_status;
+       u8 reserved1[3];
+       __le32 ocsd_memory_block_addr_high;
+       __le32 ocsd_memory_block_addr_low;
+       __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB  (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+       u8 type_status;
+       u8 reserved1[3];
+       __le32 ocbb_memory_block_addr_high;
+       __le32 ocbb_memory_block_addr_low;
+       u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+       u8      mode;
+#define I40E_AQ_TEST_PARTIAL   0
+#define I40E_AQ_TEST_FULL      1
+#define I40E_AQ_TEST_NVM       2
+       u8      reserved[3];
+       u8      command;
+#define I40E_AQ_TEST_OPEN      0
+#define I40E_AQ_TEST_CLOSE     1
+#define I40E_AQ_TEST_INC       2
+       u8      reserved2[3];
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+       __le32 reserved;
+       __le32 address;
+       __le32 value_high;
+       __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read  (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+       __le32 address;
+       __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+       __le32 address;
+       __le32 value;
+       __le32 clear_mask;
+       __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX         0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU  1
+#define I40E_AQ_CLUSTER_ID_TXSCHED     2
+#define I40E_AQ_CLUSTER_ID_HMC         3
+#define I40E_AQ_CLUSTER_ID_MAC0                4
+#define I40E_AQ_CLUSTER_ID_MAC1                5
+#define I40E_AQ_CLUSTER_ID_MAC2                6
+#define I40E_AQ_CLUSTER_ID_MAC3                7
+#define I40E_AQ_CLUSTER_ID_DCB         8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM     9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF     10
+#define I40E_AQ_CLUSTER_ID_ALTRAM      11
+
+struct i40e_aqc_debug_dump_internals {
+       u8      cluster_id;
+       u8      table_id;
+       __le16  data_size;
+       __le32  idx;
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+       u8      cluster_id;
+       u8      cluster_specific_params[7];
+       __le32  address_high;
+       __le32  address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_alloc.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_alloc.h
new file mode 100644 (file)
index 0000000..2e7e3cf
--- /dev/null
@@ -0,0 +1,58 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+       i40e_mem_arq_buf = 0,           /* ARQ indirect command buffer */
+       i40e_mem_asq_buf = 1,
+       i40e_mem_atq_buf = 2,           /* ATQ indirect command buffer */
+       i40e_mem_arq_ring = 3,          /* ARQ descriptor ring */
+       i40e_mem_atq_ring = 4,          /* ATQ descriptor ring */
+       i40e_mem_pd = 5,                /* Page Descriptor */
+       i40e_mem_bp = 6,                /* Backing Page - 4KB */
+       i40e_mem_bp_jumbo = 7,          /* Backing Page - > 4KB */
+       i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+                                           struct i40e_dma_mem *mem,
+                                           enum i40e_memory_type type,
+                                           u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+                                       struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+                                            struct i40e_virt_mem *mem,
+                                            u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+                                        struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_common.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_common.c
new file mode 100644 (file)
index 0000000..2aded6b
--- /dev/null
@@ -0,0 +1,5474 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+       i40e_status status = I40E_SUCCESS;
+
+       if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
+               switch (hw->device_id) {
+               case I40E_DEV_ID_SFP_XL710:
+               case I40E_DEV_ID_QEMU:
+               case I40E_DEV_ID_KX_A:
+               case I40E_DEV_ID_KX_B:
+               case I40E_DEV_ID_KX_C:
+               case I40E_DEV_ID_QSFP_A:
+               case I40E_DEV_ID_QSFP_B:
+               case I40E_DEV_ID_QSFP_C:
+               case I40E_DEV_ID_10G_BASE_T:
+               case I40E_DEV_ID_10G_BASE_T4:
+               case I40E_DEV_ID_20G_KR2:
+               case I40E_DEV_ID_20G_KR2_A:
+                       hw->mac.type = I40E_MAC_XL710;
+                       break;
+               case I40E_DEV_ID_VF:
+               case I40E_DEV_ID_VF_HV:
+                       hw->mac.type = I40E_MAC_VF;
+                       break;
+               default:
+                       hw->mac.type = I40E_MAC_GENERIC;
+                       break;
+               }
+       } else {
+               status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+       }
+
+       hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+                 hw->mac.type, status);
+       return status;
+}
+
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+       switch (aq_err) {
+       case I40E_AQ_RC_OK:
+               return "OK";
+       case I40E_AQ_RC_EPERM:
+               return "I40E_AQ_RC_EPERM";
+       case I40E_AQ_RC_ENOENT:
+               return "I40E_AQ_RC_ENOENT";
+       case I40E_AQ_RC_ESRCH:
+               return "I40E_AQ_RC_ESRCH";
+       case I40E_AQ_RC_EINTR:
+               return "I40E_AQ_RC_EINTR";
+       case I40E_AQ_RC_EIO:
+               return "I40E_AQ_RC_EIO";
+       case I40E_AQ_RC_ENXIO:
+               return "I40E_AQ_RC_ENXIO";
+       case I40E_AQ_RC_E2BIG:
+               return "I40E_AQ_RC_E2BIG";
+       case I40E_AQ_RC_EAGAIN:
+               return "I40E_AQ_RC_EAGAIN";
+       case I40E_AQ_RC_ENOMEM:
+               return "I40E_AQ_RC_ENOMEM";
+       case I40E_AQ_RC_EACCES:
+               return "I40E_AQ_RC_EACCES";
+       case I40E_AQ_RC_EFAULT:
+               return "I40E_AQ_RC_EFAULT";
+       case I40E_AQ_RC_EBUSY:
+               return "I40E_AQ_RC_EBUSY";
+       case I40E_AQ_RC_EEXIST:
+               return "I40E_AQ_RC_EEXIST";
+       case I40E_AQ_RC_EINVAL:
+               return "I40E_AQ_RC_EINVAL";
+       case I40E_AQ_RC_ENOTTY:
+               return "I40E_AQ_RC_ENOTTY";
+       case I40E_AQ_RC_ENOSPC:
+               return "I40E_AQ_RC_ENOSPC";
+       case I40E_AQ_RC_ENOSYS:
+               return "I40E_AQ_RC_ENOSYS";
+       case I40E_AQ_RC_ERANGE:
+               return "I40E_AQ_RC_ERANGE";
+       case I40E_AQ_RC_EFLUSHED:
+               return "I40E_AQ_RC_EFLUSHED";
+       case I40E_AQ_RC_BAD_ADDR:
+               return "I40E_AQ_RC_BAD_ADDR";
+       case I40E_AQ_RC_EMODE:
+               return "I40E_AQ_RC_EMODE";
+       case I40E_AQ_RC_EFBIG:
+               return "I40E_AQ_RC_EFBIG";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+       return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+       switch (stat_err) {
+       case I40E_SUCCESS:
+               return "OK";
+       case I40E_ERR_NVM:
+               return "I40E_ERR_NVM";
+       case I40E_ERR_NVM_CHECKSUM:
+               return "I40E_ERR_NVM_CHECKSUM";
+       case I40E_ERR_PHY:
+               return "I40E_ERR_PHY";
+       case I40E_ERR_CONFIG:
+               return "I40E_ERR_CONFIG";
+       case I40E_ERR_PARAM:
+               return "I40E_ERR_PARAM";
+       case I40E_ERR_MAC_TYPE:
+               return "I40E_ERR_MAC_TYPE";
+       case I40E_ERR_UNKNOWN_PHY:
+               return "I40E_ERR_UNKNOWN_PHY";
+       case I40E_ERR_LINK_SETUP:
+               return "I40E_ERR_LINK_SETUP";
+       case I40E_ERR_ADAPTER_STOPPED:
+               return "I40E_ERR_ADAPTER_STOPPED";
+       case I40E_ERR_INVALID_MAC_ADDR:
+               return "I40E_ERR_INVALID_MAC_ADDR";
+       case I40E_ERR_DEVICE_NOT_SUPPORTED:
+               return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+       case I40E_ERR_MASTER_REQUESTS_PENDING:
+               return "I40E_ERR_MASTER_REQUESTS_PENDING";
+       case I40E_ERR_INVALID_LINK_SETTINGS:
+               return "I40E_ERR_INVALID_LINK_SETTINGS";
+       case I40E_ERR_AUTONEG_NOT_COMPLETE:
+               return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+       case I40E_ERR_RESET_FAILED:
+               return "I40E_ERR_RESET_FAILED";
+       case I40E_ERR_SWFW_SYNC:
+               return "I40E_ERR_SWFW_SYNC";
+       case I40E_ERR_NO_AVAILABLE_VSI:
+               return "I40E_ERR_NO_AVAILABLE_VSI";
+       case I40E_ERR_NO_MEMORY:
+               return "I40E_ERR_NO_MEMORY";
+       case I40E_ERR_BAD_PTR:
+               return "I40E_ERR_BAD_PTR";
+       case I40E_ERR_RING_FULL:
+               return "I40E_ERR_RING_FULL";
+       case I40E_ERR_INVALID_PD_ID:
+               return "I40E_ERR_INVALID_PD_ID";
+       case I40E_ERR_INVALID_QP_ID:
+               return "I40E_ERR_INVALID_QP_ID";
+       case I40E_ERR_INVALID_CQ_ID:
+               return "I40E_ERR_INVALID_CQ_ID";
+       case I40E_ERR_INVALID_CEQ_ID:
+               return "I40E_ERR_INVALID_CEQ_ID";
+       case I40E_ERR_INVALID_AEQ_ID:
+               return "I40E_ERR_INVALID_AEQ_ID";
+       case I40E_ERR_INVALID_SIZE:
+               return "I40E_ERR_INVALID_SIZE";
+       case I40E_ERR_INVALID_ARP_INDEX:
+               return "I40E_ERR_INVALID_ARP_INDEX";
+       case I40E_ERR_INVALID_FPM_FUNC_ID:
+               return "I40E_ERR_INVALID_FPM_FUNC_ID";
+       case I40E_ERR_QP_INVALID_MSG_SIZE:
+               return "I40E_ERR_QP_INVALID_MSG_SIZE";
+       case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+               return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+       case I40E_ERR_INVALID_FRAG_COUNT:
+               return "I40E_ERR_INVALID_FRAG_COUNT";
+       case I40E_ERR_QUEUE_EMPTY:
+               return "I40E_ERR_QUEUE_EMPTY";
+       case I40E_ERR_INVALID_ALIGNMENT:
+               return "I40E_ERR_INVALID_ALIGNMENT";
+       case I40E_ERR_FLUSHED_QUEUE:
+               return "I40E_ERR_FLUSHED_QUEUE";
+       case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+               return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+       case I40E_ERR_INVALID_IMM_DATA_SIZE:
+               return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+       case I40E_ERR_TIMEOUT:
+               return "I40E_ERR_TIMEOUT";
+       case I40E_ERR_OPCODE_MISMATCH:
+               return "I40E_ERR_OPCODE_MISMATCH";
+       case I40E_ERR_CQP_COMPL_ERROR:
+               return "I40E_ERR_CQP_COMPL_ERROR";
+       case I40E_ERR_INVALID_VF_ID:
+               return "I40E_ERR_INVALID_VF_ID";
+       case I40E_ERR_INVALID_HMCFN_ID:
+               return "I40E_ERR_INVALID_HMCFN_ID";
+       case I40E_ERR_BACKING_PAGE_ERROR:
+               return "I40E_ERR_BACKING_PAGE_ERROR";
+       case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+               return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+       case I40E_ERR_INVALID_PBLE_INDEX:
+               return "I40E_ERR_INVALID_PBLE_INDEX";
+       case I40E_ERR_INVALID_SD_INDEX:
+               return "I40E_ERR_INVALID_SD_INDEX";
+       case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+               return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+       case I40E_ERR_INVALID_SD_TYPE:
+               return "I40E_ERR_INVALID_SD_TYPE";
+       case I40E_ERR_MEMCPY_FAILED:
+               return "I40E_ERR_MEMCPY_FAILED";
+       case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+               return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+       case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+               return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+       case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+               return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+       case I40E_ERR_SRQ_ENABLED:
+               return "I40E_ERR_SRQ_ENABLED";
+       case I40E_ERR_ADMIN_QUEUE_ERROR:
+               return "I40E_ERR_ADMIN_QUEUE_ERROR";
+       case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+               return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+       case I40E_ERR_BUF_TOO_SHORT:
+               return "I40E_ERR_BUF_TOO_SHORT";
+       case I40E_ERR_ADMIN_QUEUE_FULL:
+               return "I40E_ERR_ADMIN_QUEUE_FULL";
+       case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+               return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+       case I40E_ERR_BAD_IWARP_CQE:
+               return "I40E_ERR_BAD_IWARP_CQE";
+       case I40E_ERR_NVM_BLANK_MODE:
+               return "I40E_ERR_NVM_BLANK_MODE";
+       case I40E_ERR_NOT_IMPLEMENTED:
+               return "I40E_ERR_NOT_IMPLEMENTED";
+       case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+               return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+       case I40E_ERR_DIAG_TEST_FAILED:
+               return "I40E_ERR_DIAG_TEST_FAILED";
+       case I40E_ERR_NOT_READY:
+               return "I40E_ERR_NOT_READY";
+       case I40E_NOT_SUPPORTED:
+               return "I40E_NOT_SUPPORTED";
+       case I40E_ERR_FIRMWARE_API_VERSION:
+               return "I40E_ERR_FIRMWARE_API_VERSION";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+       return hw->err_str;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+                  void *buffer, u16 buf_len)
+{
+       struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+       u16 len = LE16_TO_CPU(aq_desc->datalen);
+       u8 *buf = (u8 *)buffer;
+       u16 i = 0;
+
+       if ((!(mask & hw->debug_mask)) || (desc == NULL))
+               return;
+
+       i40e_debug(hw, mask,
+                  "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+                  LE16_TO_CPU(aq_desc->opcode),
+                  LE16_TO_CPU(aq_desc->flags),
+                  LE16_TO_CPU(aq_desc->datalen),
+                  LE16_TO_CPU(aq_desc->retval));
+       i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+                  LE32_TO_CPU(aq_desc->cookie_high),
+                  LE32_TO_CPU(aq_desc->cookie_low));
+       i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
+                  LE32_TO_CPU(aq_desc->params.internal.param0),
+                  LE32_TO_CPU(aq_desc->params.internal.param1));
+       i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
+                  LE32_TO_CPU(aq_desc->params.external.addr_high),
+                  LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+       if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+               i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+               if (buf_len < len)
+                       len = buf_len;
+               /* write the full 16-byte chunks */
+               for (i = 0; i < (len - 16); i += 16)
+                       i40e_debug(hw, mask,
+                                  "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+                                  i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+                                  buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+                                  buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+                                  buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+               /* write whatever's left over without overrunning the buffer */
+               if (i < len) {
+                       char d_buf[80];
+                       int j = 0;
+
+                       memset(d_buf, 0, sizeof(d_buf));
+                       j += sprintf(d_buf, "\t0x%04X ", i);
+                       while (i < len)
+                               j += sprintf(&d_buf[j], " %02X", buf[i++]);
+                       i40e_debug(hw, mask, "%s\n", d_buf);
+               }
+       }
+}
+
+/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+       if (hw->aq.asq.len)
+               return !!(rd32(hw, hw->aq.asq.len) &
+                       I40E_PF_ATQLEN_ATQENABLE_MASK);
+       return false;
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+                                            bool unloading)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_queue_shutdown *cmd =
+               (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_queue_shutdown);
+
+       if (unloading)
+               cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING);
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+       return status;
+}
+
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ *      Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ *      Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ *      Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+       {       PTYPE, \
+               1, \
+               I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+               I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+               I40E_RX_PTYPE_##OUTER_FRAG, \
+               I40E_RX_PTYPE_TUNNEL_##T, \
+               I40E_RX_PTYPE_TUNNEL_END_##TE, \
+               I40E_RX_PTYPE_##TEF, \
+               I40E_RX_PTYPE_INNER_PROT_##I, \
+               I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+               { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF              I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG              I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS    I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+       /* L2 Packet types */
+       I40E_PTT_UNUSED_ENTRY(0),
+       I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
+       I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT_UNUSED_ENTRY(4),
+       I40E_PTT_UNUSED_ENTRY(5),
+       I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT_UNUSED_ENTRY(8),
+       I40E_PTT_UNUSED_ENTRY(9),
+       I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+       I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+       /* Non Tunneled IPv4 */
+       I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(25),
+       I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
+       I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+       I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+       /* IPv4 --> IPv4 */
+       I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(32),
+       I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 --> IPv6 */
+       I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(39),
+       I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT */
+       I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+       /* IPv4 --> GRE/NAT --> IPv4 */
+       I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(47),
+       I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT --> IPv6 */
+       I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(54),
+       I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT --> MAC */
+       I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+       /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+       I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(62),
+       I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+       I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(69),
+       I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT --> MAC/VLAN */
+       I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+       /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+       I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(77),
+       I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+       I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(84),
+       I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+       /* Non Tunneled IPv6 */
+       I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
+       I40E_PTT_UNUSED_ENTRY(91),
+       I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
+       I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+       I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+       /* IPv6 --> IPv4 */
+       I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(98),
+       I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> IPv6 */
+       I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(105),
+       I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT */
+       I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+       /* IPv6 --> GRE/NAT -> IPv4 */
+       I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(113),
+       I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> IPv6 */
+       I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(120),
+       I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC */
+       I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+       /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+       I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(128),
+       I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+       I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(135),
+       I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC/VLAN */
+       I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+       /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+       I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(143),
+       I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+       I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(150),
+       I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+       /* unused entries */
+       I40E_PTT_UNUSED_ENTRY(154),
+       I40E_PTT_UNUSED_ENTRY(155),
+       I40E_PTT_UNUSED_ENTRY(156),
+       I40E_PTT_UNUSED_ENTRY(157),
+       I40E_PTT_UNUSED_ENTRY(158),
+       I40E_PTT_UNUSED_ENTRY(159),
+
+       I40E_PTT_UNUSED_ENTRY(160),
+       I40E_PTT_UNUSED_ENTRY(161),
+       I40E_PTT_UNUSED_ENTRY(162),
+       I40E_PTT_UNUSED_ENTRY(163),
+       I40E_PTT_UNUSED_ENTRY(164),
+       I40E_PTT_UNUSED_ENTRY(165),
+       I40E_PTT_UNUSED_ENTRY(166),
+       I40E_PTT_UNUSED_ENTRY(167),
+       I40E_PTT_UNUSED_ENTRY(168),
+       I40E_PTT_UNUSED_ENTRY(169),
+
+       I40E_PTT_UNUSED_ENTRY(170),
+       I40E_PTT_UNUSED_ENTRY(171),
+       I40E_PTT_UNUSED_ENTRY(172),
+       I40E_PTT_UNUSED_ENTRY(173),
+       I40E_PTT_UNUSED_ENTRY(174),
+       I40E_PTT_UNUSED_ENTRY(175),
+       I40E_PTT_UNUSED_ENTRY(176),
+       I40E_PTT_UNUSED_ENTRY(177),
+       I40E_PTT_UNUSED_ENTRY(178),
+       I40E_PTT_UNUSED_ENTRY(179),
+
+       I40E_PTT_UNUSED_ENTRY(180),
+       I40E_PTT_UNUSED_ENTRY(181),
+       I40E_PTT_UNUSED_ENTRY(182),
+       I40E_PTT_UNUSED_ENTRY(183),
+       I40E_PTT_UNUSED_ENTRY(184),
+       I40E_PTT_UNUSED_ENTRY(185),
+       I40E_PTT_UNUSED_ENTRY(186),
+       I40E_PTT_UNUSED_ENTRY(187),
+       I40E_PTT_UNUSED_ENTRY(188),
+       I40E_PTT_UNUSED_ENTRY(189),
+
+       I40E_PTT_UNUSED_ENTRY(190),
+       I40E_PTT_UNUSED_ENTRY(191),
+       I40E_PTT_UNUSED_ENTRY(192),
+       I40E_PTT_UNUSED_ENTRY(193),
+       I40E_PTT_UNUSED_ENTRY(194),
+       I40E_PTT_UNUSED_ENTRY(195),
+       I40E_PTT_UNUSED_ENTRY(196),
+       I40E_PTT_UNUSED_ENTRY(197),
+       I40E_PTT_UNUSED_ENTRY(198),
+       I40E_PTT_UNUSED_ENTRY(199),
+
+       I40E_PTT_UNUSED_ENTRY(200),
+       I40E_PTT_UNUSED_ENTRY(201),
+       I40E_PTT_UNUSED_ENTRY(202),
+       I40E_PTT_UNUSED_ENTRY(203),
+       I40E_PTT_UNUSED_ENTRY(204),
+       I40E_PTT_UNUSED_ENTRY(205),
+       I40E_PTT_UNUSED_ENTRY(206),
+       I40E_PTT_UNUSED_ENTRY(207),
+       I40E_PTT_UNUSED_ENTRY(208),
+       I40E_PTT_UNUSED_ENTRY(209),
+
+       I40E_PTT_UNUSED_ENTRY(210),
+       I40E_PTT_UNUSED_ENTRY(211),
+       I40E_PTT_UNUSED_ENTRY(212),
+       I40E_PTT_UNUSED_ENTRY(213),
+       I40E_PTT_UNUSED_ENTRY(214),
+       I40E_PTT_UNUSED_ENTRY(215),
+       I40E_PTT_UNUSED_ENTRY(216),
+       I40E_PTT_UNUSED_ENTRY(217),
+       I40E_PTT_UNUSED_ENTRY(218),
+       I40E_PTT_UNUSED_ENTRY(219),
+
+       I40E_PTT_UNUSED_ENTRY(220),
+       I40E_PTT_UNUSED_ENTRY(221),
+       I40E_PTT_UNUSED_ENTRY(222),
+       I40E_PTT_UNUSED_ENTRY(223),
+       I40E_PTT_UNUSED_ENTRY(224),
+       I40E_PTT_UNUSED_ENTRY(225),
+       I40E_PTT_UNUSED_ENTRY(226),
+       I40E_PTT_UNUSED_ENTRY(227),
+       I40E_PTT_UNUSED_ENTRY(228),
+       I40E_PTT_UNUSED_ENTRY(229),
+
+       I40E_PTT_UNUSED_ENTRY(230),
+       I40E_PTT_UNUSED_ENTRY(231),
+       I40E_PTT_UNUSED_ENTRY(232),
+       I40E_PTT_UNUSED_ENTRY(233),
+       I40E_PTT_UNUSED_ENTRY(234),
+       I40E_PTT_UNUSED_ENTRY(235),
+       I40E_PTT_UNUSED_ENTRY(236),
+       I40E_PTT_UNUSED_ENTRY(237),
+       I40E_PTT_UNUSED_ENTRY(238),
+       I40E_PTT_UNUSED_ENTRY(239),
+
+       I40E_PTT_UNUSED_ENTRY(240),
+       I40E_PTT_UNUSED_ENTRY(241),
+       I40E_PTT_UNUSED_ENTRY(242),
+       I40E_PTT_UNUSED_ENTRY(243),
+       I40E_PTT_UNUSED_ENTRY(244),
+       I40E_PTT_UNUSED_ENTRY(245),
+       I40E_PTT_UNUSED_ENTRY(246),
+       I40E_PTT_UNUSED_ENTRY(247),
+       I40E_PTT_UNUSED_ENTRY(248),
+       I40E_PTT_UNUSED_ENTRY(249),
+
+       I40E_PTT_UNUSED_ENTRY(250),
+       I40E_PTT_UNUSED_ENTRY(251),
+       I40E_PTT_UNUSED_ENTRY(252),
+       I40E_PTT_UNUSED_ENTRY(253),
+       I40E_PTT_UNUSED_ENTRY(254),
+       I40E_PTT_UNUSED_ENTRY(255)
+};
+
+/**
+ * i40e_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+{
+       i40e_status status = I40E_SUCCESS;
+
+       /* Broadcast addresses ARE multicast addresses
+        * Make sure it is not a multicast address
+        * Reject the zero address
+        */
+       if (I40E_IS_MULTICAST(mac_addr) ||
+           (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+             mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+               status = I40E_ERR_INVALID_MAC_ADDR;
+
+       return status;
+}
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function.  The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+{
+       i40e_status status = I40E_SUCCESS;
+       u32 port, ari, func_rid;
+
+       i40e_set_mac_type(hw);
+
+       switch (hw->mac.type) {
+       case I40E_MAC_XL710:
+               break;
+       default:
+               return I40E_ERR_DEVICE_NOT_SUPPORTED;
+       }
+
+       hw->phy.get_link_info = true;
+
+       /* Determine port number and PF number*/
+       port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
+                                          >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+       hw->port = (u8)port;
+       ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
+                                                I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+       func_rid = rd32(hw, I40E_PF_FUNC_RID);
+       if (ari)
+               hw->pf_id = (u8)(func_rid & 0xff);
+       else
+               hw->pf_id = (u8)(func_rid & 0x7);
+
+       status = i40e_init_nvm(hw);
+       return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
+                                  u16 *flags,
+                                  struct i40e_aqc_mac_address_read_data *addrs,
+                                  struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_mac_address_read *cmd_data =
+               (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+       desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+
+       status = i40e_asq_send_command(hw, &desc, addrs,
+                                      sizeof(*addrs), cmd_details);
+       *flags = LE16_TO_CPU(cmd_data->command_flags);
+
+       return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+                                   u16 flags, u8 *mac_addr,
+                                   struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_mac_address_write *cmd_data =
+               (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_mac_address_write);
+       cmd_data->command_flags = CPU_TO_LE16(flags);
+       cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]);
+       cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) |
+                                       ((u32)mac_addr[3] << 16) |
+                                       ((u32)mac_addr[4] << 8) |
+                                       mac_addr[5]);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+       struct i40e_aqc_mac_address_read_data addrs;
+       i40e_status status;
+       u16 flags = 0;
+
+       status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+       if (flags & I40E_AQC_LAN_ADDR_VALID)
+               memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+       return status;
+}
+
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+       struct i40e_aqc_mac_address_read_data addrs;
+       i40e_status status;
+       u16 flags = 0;
+
+       status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+       if (status)
+               return status;
+
+       if (flags & I40E_AQC_PORT_ADDR_VALID)
+               memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+       else
+               status = I40E_ERR_INVALID_MAC_ADDR;
+
+       return status;
+}
+
+/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+       u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+       u32 reg_block = 0;
+       u32 reg_val;
+
+       if (abs_queue_idx >= 128) {
+               reg_block = abs_queue_idx / 128;
+               abs_queue_idx %= 128;
+       }
+
+       reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+       reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+       reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+       if (enable)
+               reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+       else
+               reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+       wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+#ifdef I40E_FCOE
+
+/**
+ * i40e_get_san_mac_addr - get SAN MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to SAN MAC address
+ *
+ * Reads the adapter's SAN MAC address from NVM
+ **/
+i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw,
+                                           u8 *mac_addr)
+{
+       struct i40e_aqc_mac_address_read_data addrs;
+       i40e_status status;
+       u16 flags = 0;
+
+       status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+       if (status)
+               return status;
+
+       if (flags & I40E_AQC_SAN_ADDR_VALID)
+               memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+       else
+               status = I40E_ERR_INVALID_MAC_ADDR;
+
+       return status;
+}
+#endif
+
+/**
+ *  i40e_read_pba_string - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+                                           u32 pba_num_size)
+{
+       i40e_status status = I40E_SUCCESS;
+       u16 pba_word = 0;
+       u16 pba_size = 0;
+       u16 pba_ptr = 0;
+       u16 i = 0;
+
+       status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+       if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
+               hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
+               return status;
+       }
+
+       status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+       if (status != I40E_SUCCESS) {
+               hw_dbg(hw, "Failed to read PBA Block pointer.\n");
+               return status;
+       }
+
+       status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+       if (status != I40E_SUCCESS) {
+               hw_dbg(hw, "Failed to read PBA Block size.\n");
+               return status;
+       }
+
+       /* Subtract one to get PBA word count (PBA Size word is included in
+        * total size)
+        */
+       pba_size--;
+       if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+               hw_dbg(hw, "Buffer to small for PBA data.\n");
+               return I40E_ERR_PARAM;
+       }
+
+       for (i = 0; i < pba_size; i++) {
+               status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+               if (status != I40E_SUCCESS) {
+                       hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
+                       return status;
+               }
+
+               pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+               pba_num[(i * 2) + 1] = pba_word & 0xFF;
+       }
+       pba_num[(pba_size * 2)] = '\0';
+
+       return status;
+}
+
+/**
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+{
+       enum i40e_media_type media;
+
+       switch (hw->phy.link_info.phy_type) {
+       case I40E_PHY_TYPE_10GBASE_SR:
+       case I40E_PHY_TYPE_10GBASE_LR:
+       case I40E_PHY_TYPE_1000BASE_SX:
+       case I40E_PHY_TYPE_1000BASE_LX:
+       case I40E_PHY_TYPE_40GBASE_SR4:
+       case I40E_PHY_TYPE_40GBASE_LR4:
+               media = I40E_MEDIA_TYPE_FIBER;
+               break;
+       case I40E_PHY_TYPE_100BASE_TX:
+       case I40E_PHY_TYPE_1000BASE_T:
+       case I40E_PHY_TYPE_10GBASE_T:
+               media = I40E_MEDIA_TYPE_BASET;
+               break;
+       case I40E_PHY_TYPE_10GBASE_CR1_CU:
+       case I40E_PHY_TYPE_40GBASE_CR4_CU:
+       case I40E_PHY_TYPE_10GBASE_CR1:
+       case I40E_PHY_TYPE_40GBASE_CR4:
+       case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+       case I40E_PHY_TYPE_40GBASE_AOC:
+       case I40E_PHY_TYPE_10GBASE_AOC:
+               media = I40E_MEDIA_TYPE_DA;
+               break;
+       case I40E_PHY_TYPE_1000BASE_KX:
+       case I40E_PHY_TYPE_10GBASE_KX4:
+       case I40E_PHY_TYPE_10GBASE_KR:
+       case I40E_PHY_TYPE_40GBASE_KR4:
+       case I40E_PHY_TYPE_20GBASE_KR2:
+               media = I40E_MEDIA_TYPE_BACKPLANE;
+               break;
+       case I40E_PHY_TYPE_SGMII:
+       case I40E_PHY_TYPE_XAUI:
+       case I40E_PHY_TYPE_XFI:
+       case I40E_PHY_TYPE_XLAUI:
+       case I40E_PHY_TYPE_XLPPI:
+       default:
+               media = I40E_MEDIA_TYPE_UNKNOWN;
+               break;
+       }
+
+       return media;
+}
+
+#define I40E_PF_RESET_WAIT_COUNT       200
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+i40e_status i40e_pf_reset(struct i40e_hw *hw)
+{
+       u32 cnt = 0;
+       u32 cnt1 = 0;
+       u32 reg = 0;
+       u32 grst_del;
+
+       /* Poll for Global Reset steady state in case of recent GRST.
+        * The grst delay value is in 100ms units, and we'll wait a
+        * couple counts longer to be sure we don't just miss the end.
+        */
+       grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+                       I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+                       I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+       for (cnt = 0; cnt < grst_del + 10; cnt++) {
+               reg = rd32(hw, I40E_GLGEN_RSTAT);
+               if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+                       break;
+               msleep(100);
+       }
+       if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+               hw_dbg(hw, "Global reset polling failed to complete.\n");
+               return I40E_ERR_RESET_FAILED;
+       }
+
+       /* Now Wait for the FW to be ready */
+       for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+               reg = rd32(hw, I40E_GLNVM_ULD);
+               reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+                       I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+               if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+                           I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+                       hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
+                       break;
+               }
+               usleep_range(10000, 20000);
+       }
+       if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+                    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+               hw_dbg(hw, "wait for FW Reset complete timedout\n");
+               hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
+               return I40E_ERR_RESET_FAILED;
+       }
+
+       /* If there was a Global Reset in progress when we got here,
+        * we don't need to do the PF Reset
+        */
+       if (!cnt) {
+               reg = rd32(hw, I40E_PFGEN_CTRL);
+               wr32(hw, I40E_PFGEN_CTRL,
+                    (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+               for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
+                       reg = rd32(hw, I40E_PFGEN_CTRL);
+                       if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+                               break;
+                       usleep_range(1000, 2000);
+               }
+               if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+                       hw_dbg(hw, "PF reset polling failed to complete.\n");
+                       return I40E_ERR_RESET_FAILED;
+               }
+       }
+
+       i40e_clear_pxe_mode(hw);
+
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+       u32 num_queues, base_queue;
+       u32 num_pf_int;
+       u32 num_vf_int;
+       u32 num_vfs;
+       u32 i, j;
+       u32 val;
+       u32 eol = 0x7ff;
+
+       /* get number of interrupts, queues, and vfs */
+       val = rd32(hw, I40E_GLPCI_CNF2);
+       num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+                       I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+       num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+                       I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+       val = rd32(hw, I40E_PFLAN_QALLOC);
+       base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+                       I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+       j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+                       I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+       if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+               num_queues = (j - base_queue) + 1;
+       else
+               num_queues = 0;
+
+       val = rd32(hw, I40E_PF_VT_PFALLOC);
+       i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+                       I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+       j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+                       I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+       if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+               num_vfs = (j - i) + 1;
+       else
+               num_vfs = 0;
+
+       /* stop all the interrupts */
+       wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+       val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+       for (i = 0; i < num_pf_int - 2; i++)
+               wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+       /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+       val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+       wr32(hw, I40E_PFINT_LNKLST0, val);
+       for (i = 0; i < num_pf_int - 2; i++)
+               wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+       val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+       for (i = 0; i < num_vfs; i++)
+               wr32(hw, I40E_VPINT_LNKLST0(i), val);
+       for (i = 0; i < num_vf_int - 2; i++)
+               wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+       /* warn the HW of the coming Tx disables */
+       for (i = 0; i < num_queues; i++) {
+               u32 abs_queue_idx = base_queue + i;
+               u32 reg_block = 0;
+
+               if (abs_queue_idx >= 128) {
+                       reg_block = abs_queue_idx / 128;
+                       abs_queue_idx %= 128;
+               }
+
+               val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+               val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+               val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+               val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+               wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+       }
+       udelay(400);
+
+       /* stop all the queues */
+       for (i = 0; i < num_queues; i++) {
+               wr32(hw, I40E_QINT_TQCTL(i), 0);
+               wr32(hw, I40E_QTX_ENA(i), 0);
+               wr32(hw, I40E_QINT_RQCTL(i), 0);
+               wr32(hw, I40E_QRX_ENA(i), 0);
+       }
+
+       /* short wait for all queue disables to settle */
+       udelay(50);
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+       if (i40e_check_asq_alive(hw))
+               i40e_aq_clear_pxe_mode(hw, NULL);
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+       u32 gpio_val = 0;
+       u32 port;
+
+       if (!hw->func_caps.led[idx])
+               return 0;
+
+       gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+       port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+               I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+       /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+        * if it is not our port then ignore
+        */
+       if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+           (port != hw->port))
+               return 0;
+
+       return gpio_val;
+}
+
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
+#define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+       u32 current_mode = 0;
+       u32 mode = 0;
+       int i;
+
+       /* as per the documentation GPIO 22-29 are the LED
+        * GPIO pins named LED0..LED7
+        */
+       for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+               u32 gpio_val = i40e_led_is_mine(hw, i);
+
+               if (!gpio_val)
+                       continue;
+
+               /* ignore gpio LED src mode entries related to the activity
+                *  LEDs
+                */
+               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+               switch (current_mode) {
+               case I40E_COMBINED_ACTIVITY:
+               case I40E_FILTER_ACTIVITY:
+               case I40E_MAC_ACTIVITY:
+                       continue;
+               default:
+                       break;
+               }
+
+               mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+                       I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+               break;
+       }
+
+       return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+{
+       u32 current_mode = 0;
+       int i;
+
+       if (mode & 0xfffffff0)
+               hw_dbg(hw, "invalid mode passed in %X\n", mode);
+
+       /* as per the documentation GPIO 22-29 are the LED
+        * GPIO pins named LED0..LED7
+        */
+       for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+               u32 gpio_val = i40e_led_is_mine(hw, i);
+
+               if (!gpio_val)
+                       continue;
+
+               /* ignore gpio LED src mode entries related to the activity
+                * LEDs
+                */
+               current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+                               >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+               switch (current_mode) {
+               case I40E_COMBINED_ACTIVITY:
+               case I40E_FILTER_ACTIVITY:
+               case I40E_MAC_ACTIVITY:
+                       continue;
+               default:
+                       break;
+               }
+
+               gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+               /* this & is a bit of paranoia, but serves as a range check */
+               gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+                            I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+               if (mode == I40E_LINK_ACTIVITY)
+                       blink = false;
+
+               if (blink)
+                       gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+               else
+                       gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+
+               wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+               break;
+       }
+}
+
+/* Admin command wrappers */
+
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+                       bool qualified_modules, bool report_init,
+                       struct i40e_aq_get_phy_abilities_resp *abilities,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+       u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+
+       if (!abilities)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_get_phy_abilities);
+
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       if (abilities_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       if (qualified_modules)
+               desc.params.external.param0 |=
+                       CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+       if (report_init)
+               desc.params.external.param0 |=
+                       CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+       status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
+                                   cmd_details);
+
+       if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
+               status = I40E_ERR_UNKNOWN_PHY;
+
+       if (report_init)
+               hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+i40e_status i40e_aq_set_phy_config(struct i40e_hw *hw,
+                               struct i40e_aq_set_phy_config *config,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aq_set_phy_config *cmd =
+               (struct i40e_aq_set_phy_config *)&desc.params.raw;
+       i40e_status status;
+
+       if (!config)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_phy_config);
+
+       *cmd = *config;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+i40e_status i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+                                 bool atomic_restart)
+{
+       enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       struct i40e_aq_set_phy_config config;
+       i40e_status status;
+       u8 pause_mask = 0x0;
+
+       *aq_failures = 0x0;
+
+       switch (fc_mode) {
+       case I40E_FC_FULL:
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+               break;
+       case I40E_FC_RX_PAUSE:
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+               break;
+       case I40E_FC_TX_PAUSE:
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+               break;
+       default:
+               break;
+       }
+
+       /* Get the current phy config */
+       status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+                                             NULL);
+       if (status) {
+               *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+               return status;
+       }
+
+       memset(&config, 0, sizeof(config));
+       /* clear the old pause settings */
+       config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+                          ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+       /* set the new abilities */
+       config.abilities |= pause_mask;
+       /* If the abilities have changed, then set the new config */
+       if (config.abilities != abilities.abilities) {
+               /* Auto restart link so settings take effect */
+               if (atomic_restart)
+                       config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+               /* Copy over all the old settings */
+               config.phy_type = abilities.phy_type;
+               config.link_speed = abilities.link_speed;
+               config.eee_capability = abilities.eee_capability;
+               config.eeer = abilities.eeer_val;
+               config.low_power_ctrl = abilities.d3_lpan;
+               status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+               if (status)
+                       *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+       }
+       /* Update the link info */
+       status = i40e_update_link_info(hw);
+       if (status) {
+               /* Wait a little bit (on 40G cards it sometimes takes a really
+                * long time for link to come back from the atomic reset)
+                * and try once more
+                */
+               msleep(1000);
+               status = i40e_update_link_info(hw);
+       }
+       if (status)
+               *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_mac_config
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @crc_en: Tell HW to append a CRC to outgoing frames
+ * @pacing: Pacing configurations
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Configure MAC settings for frame size, jumbo frame support and the
+ * addition of a CRC by the hardware.
+ **/
+i40e_status i40e_aq_set_mac_config(struct i40e_hw *hw,
+                               u16 max_frame_size,
+                               bool crc_en, u16 pacing,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aq_set_mac_config *cmd =
+               (struct i40e_aq_set_mac_config *)&desc.params.raw;
+       i40e_status status;
+
+       if (max_frame_size == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_mac_config);
+
+       cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+       cmd->params = ((u8)pacing & 0x0F) << 3;
+       if (crc_en)
+               cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_clear_pxe *cmd =
+               (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_clear_pxe_mode);
+
+       cmd->rx_cnt = 0x2;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @enable_link: if true: enable link, if false: disable link
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+               bool enable_link, struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_link_restart_an *cmd =
+               (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_link_restart_an);
+
+       cmd->command = I40E_AQ_PHY_RESTART_AN;
+       if (enable_link)
+               cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+       else
+               cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+                               bool enable_lse, struct i40e_link_status *link,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_link_status *resp =
+               (struct i40e_aqc_get_link_status *)&desc.params.raw;
+       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+       i40e_status status;
+       bool tx_pause, rx_pause;
+       u16 command_flags;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+       if (enable_lse)
+               command_flags = I40E_AQ_LSE_ENABLE;
+       else
+               command_flags = I40E_AQ_LSE_DISABLE;
+       resp->command_flags = CPU_TO_LE16(command_flags);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (status != I40E_SUCCESS)
+               goto aq_get_link_info_exit;
+
+       /* save off old link status information */
+       i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
+                   sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA);
+
+       /* update link status */
+       hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+       hw->phy.media_type = i40e_get_media_type(hw);
+       hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+       hw_link_info->link_info = resp->link_info;
+       hw_link_info->an_info = resp->an_info;
+       hw_link_info->ext_info = resp->ext_info;
+       hw_link_info->loopback = resp->loopback;
+       hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
+       hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+       /* update fc info */
+       tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+       rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+       if (tx_pause & rx_pause)
+               hw->fc.current_mode = I40E_FC_FULL;
+       else if (tx_pause)
+               hw->fc.current_mode = I40E_FC_TX_PAUSE;
+       else if (rx_pause)
+               hw->fc.current_mode = I40E_FC_RX_PAUSE;
+       else
+               hw->fc.current_mode = I40E_FC_NONE;
+
+       if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+               hw_link_info->crc_enable = true;
+       else
+               hw_link_info->crc_enable = false;
+
+       if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+               hw_link_info->lse_enable = true;
+       else
+               hw_link_info->lse_enable = false;
+
+       if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+            hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+               hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
+       /* save link status information */
+       if (link)
+               i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
+                           I40E_NONDMA_TO_NONDMA);
+
+       /* flag cleared so helper functions don't call AQ again */
+       hw->phy.get_link_info = false;
+
+aq_get_link_info_exit:
+       return status;
+}
+
+/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+                               u16 mask,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_phy_int_mask *cmd =
+               (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_phy_int_mask);
+
+       cmd->event_mask = CPU_TO_LE16(mask);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+i40e_status i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+                               u64 *advt_reg,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_an_advt_reg *resp =
+               (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_get_local_advt_reg);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (status != I40E_SUCCESS)
+               goto aq_get_local_advt_reg_exit;
+
+       *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+       *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_local_advt_reg_exit:
+       return status;
+}
+
+/**
+ * i40e_aq_set_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+                               u64 advt_reg,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_an_advt_reg *cmd =
+               (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_get_local_advt_reg);
+
+       cmd->local_an_reg0 = CPU_TO_LE32(lower_32_bits(advt_reg));
+       cmd->local_an_reg1 = CPU_TO_LE16(upper_32_bits(advt_reg));
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_partner_advt
+ * @hw: pointer to the hw struct
+ * @advt_reg: AN partner advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the link partner AN advertisement register value.
+ **/
+i40e_status i40e_aq_get_partner_advt(struct i40e_hw *hw,
+                               u64 *advt_reg,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_an_advt_reg *resp =
+               (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_get_partner_advt);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (status != I40E_SUCCESS)
+               goto aq_get_partner_advt_exit;
+
+       *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+       *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_partner_advt_exit:
+       return status;
+}
+
+/**
+ * i40e_aq_set_lb_modes
+ * @hw: pointer to the hw struct
+ * @lb_modes: loopback mode to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets loopback modes.
+ **/
+i40e_status i40e_aq_set_lb_modes(struct i40e_hw *hw,
+                               u16 lb_modes,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_lb_mode *cmd =
+               (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_lb_modes);
+
+       cmd->lb_mode = CPU_TO_LE16(lb_modes);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_phy_debug
+ * @hw: pointer to the hw struct
+ * @cmd_flags: debug command flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_phy_debug *cmd =
+               (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_phy_debug);
+
+       cmd->command_flags = cmd_flags;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+                               struct i40e_vsi_context *vsi_ctx,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_get_update_vsi *cmd =
+               (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+       struct i40e_aqc_add_get_update_vsi_completion *resp =
+               (struct i40e_aqc_add_get_update_vsi_completion *)
+               &desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_add_vsi);
+
+       cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid);
+       cmd->connection_type = vsi_ctx->connection_type;
+       cmd->vf_id = vsi_ctx->vf_num;
+       cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
+
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+       status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+                                   sizeof(vsi_ctx->info), cmd_details);
+
+       if (status != I40E_SUCCESS)
+               goto aq_add_vsi_exit;
+
+       vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+       vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+       vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+       vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_add_vsi_exit:
+       return status;
+}
+
+/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
+                               u16 seid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)
+               &desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+       cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+       cmd->seid = CPU_TO_LE16(seid);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+                               u16 seid, bool set,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       i40e_status status;
+       u16 flags = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       if (set)
+               flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+       cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+       cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+
+       cmd->seid = CPU_TO_LE16(seid);
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+                               u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       i40e_status status;
+       u16 flags = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       if (set)
+               flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+       cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+       cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+       cmd->seid = CPU_TO_LE16(seid);
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+                               u16 seid, bool enable, u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       i40e_status status;
+       u16 flags = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       if (enable)
+               flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+       cmd->promiscuous_flags = CPU_TO_LE16(flags);
+       cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+       cmd->seid = CPU_TO_LE16(seid);
+       cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+                               u16 seid, bool enable, u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       i40e_status status;
+       u16 flags = 0;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       if (enable)
+               flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+       cmd->promiscuous_flags = CPU_TO_LE16(flags);
+       cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+       cmd->seid = CPU_TO_LE16(seid);
+       cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+                               u16 seid, bool set_filter,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+               (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+       if (set_filter)
+               cmd->promiscuous_flags
+                           |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+       else
+               cmd->promiscuous_flags
+                           &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+       cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+       cmd->seid = CPU_TO_LE16(seid);
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+                               struct i40e_vsi_context *vsi_ctx,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_get_update_vsi *cmd =
+               (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+       struct i40e_aqc_add_get_update_vsi_completion *resp =
+               (struct i40e_aqc_add_get_update_vsi_completion *)
+               &desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_get_vsi_parameters);
+
+       cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+       status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+                                   sizeof(vsi_ctx->info), NULL);
+
+       if (status != I40E_SUCCESS)
+               goto aq_get_vsi_params_exit;
+
+       vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+       vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+       vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+       vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+       return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+                               struct i40e_vsi_context *vsi_ctx,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_get_update_vsi *cmd =
+               (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_update_vsi_parameters);
+       cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+       status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+                                   sizeof(vsi_ctx->info), cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+                               struct i40e_aqc_get_switch_config_resp *buf,
+                               u16 buf_size, u16 *start_seid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_switch_seid *scfg =
+               (struct i40e_aqc_switch_seid *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_get_switch_config);
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       if (buf_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+       scfg->seid = CPU_TO_LE16(*start_seid);
+
+       status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+       *start_seid = LE16_TO_CPU(scfg->seid);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+                               u16 *fw_major_version, u16 *fw_minor_version,
+                               u32 *fw_build,
+                               u16 *api_major_version, u16 *api_minor_version,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_version *resp =
+               (struct i40e_aqc_get_version *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (status == I40E_SUCCESS) {
+               if (fw_major_version != NULL)
+                       *fw_major_version = LE16_TO_CPU(resp->fw_major);
+               if (fw_minor_version != NULL)
+                       *fw_minor_version = LE16_TO_CPU(resp->fw_minor);
+               if (fw_build != NULL)
+                       *fw_build = LE32_TO_CPU(resp->fw_build);
+               if (api_major_version != NULL)
+                       *api_major_version = LE16_TO_CPU(resp->api_major);
+               if (api_minor_version != NULL)
+                       *api_minor_version = LE16_TO_CPU(resp->api_minor);
+
+               /* A workaround to fix the API version in SW */
+               if (api_major_version && api_minor_version &&
+                   fw_major_version && fw_minor_version &&
+                   ((*api_major_version == 1) && (*api_minor_version == 1)) &&
+                   (((*fw_major_version == 4) && (*fw_minor_version >= 2)) ||
+                    (*fw_major_version > 4)))
+                       *api_minor_version = 2;
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+                               struct i40e_driver_version *dv,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_driver_version *cmd =
+               (struct i40e_aqc_driver_version *)&desc.params.raw;
+       i40e_status status;
+       u16 len;
+
+       if (dv == NULL)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+       desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+       cmd->driver_major_ver = dv->major_version;
+       cmd->driver_minor_ver = dv->minor_version;
+       cmd->driver_build_ver = dv->build_version;
+       cmd->driver_subbuild_ver = dv->subbuild_version;
+
+       len = 0;
+       while (len < sizeof(dv->driver_string) &&
+              (dv->driver_string[len] < 0x80) &&
+              dv->driver_string[len])
+               len++;
+       status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+                                      len, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+{
+       i40e_status status = I40E_SUCCESS;
+
+       if (hw->phy.get_link_info) {
+               status = i40e_update_link_info(hw);
+
+               if (status != I40E_SUCCESS)
+                       i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+                                  status);
+       }
+
+       *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+       return status;
+}
+
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+i40e_status i40e_update_link_info(struct i40e_hw *hw)
+{
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       i40e_status status = I40E_SUCCESS;
+
+       status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+       if (status)
+               return status;
+
+       if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+               status = i40e_aq_get_phy_capabilities(hw, false, false,
+                                                     &abilities, NULL);
+               if (status)
+                       return status;
+
+               memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+                       sizeof(hw->phy.link_info.module_type));
+       }
+
+       return status;
+}
+
+
+/**
+ * i40e_get_link_speed
+ * @hw: pointer to the hw struct
+ *
+ * Returns the link speed of the adapter.
+ **/
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw)
+{
+       enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN;
+       i40e_status status = I40E_SUCCESS;
+
+       if (hw->phy.get_link_info) {
+               status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+
+               if (status != I40E_SUCCESS)
+                       goto i40e_link_speed_exit;
+       }
+
+       speed = hw->phy.link_info.link_speed;
+
+i40e_link_speed_exit:
+       return speed;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: true for default port VSI, false for control port
+ * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements.  If the uplink SEID is 0, this will be a floating VEB.
+ **/
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+                               u16 downlink_seid, u8 enabled_tc,
+                               bool default_port, bool enable_l2_filtering,
+                               u16 *veb_seid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_veb *cmd =
+               (struct i40e_aqc_add_veb *)&desc.params.raw;
+       struct i40e_aqc_add_veb_completion *resp =
+               (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+       i40e_status status;
+       u16 veb_flags = 0;
+
+       /* SEIDs need to either both be set or both be 0 for floating VEB */
+       if (!!uplink_seid != !!downlink_seid)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+       cmd->uplink_seid = CPU_TO_LE16(uplink_seid);
+       cmd->downlink_seid = CPU_TO_LE16(downlink_seid);
+       cmd->enable_tcs = enabled_tc;
+       if (!uplink_seid)
+               veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+       if (default_port)
+               veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+       else
+               veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+       if (enable_l2_filtering)
+               veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
+       cmd->veb_flags = CPU_TO_LE16(veb_flags);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status && veb_seid)
+               *veb_seid = LE16_TO_CPU(resp->veb_seid);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating: set to true if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_free: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+                               u16 veb_seid, u16 *switch_id,
+                               bool *floating, u16 *statistic_index,
+                               u16 *vebs_used, u16 *vebs_free,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+               (struct i40e_aqc_get_veb_parameters_completion *)
+               &desc.params.raw;
+       i40e_status status;
+
+       if (veb_seid == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_get_veb_parameters);
+       cmd_resp->seid = CPU_TO_LE16(veb_seid);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+       if (status)
+               goto get_veb_exit;
+
+       if (switch_id)
+               *switch_id = LE16_TO_CPU(cmd_resp->switch_id);
+       if (statistic_index)
+               *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index);
+       if (vebs_used)
+               *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used);
+       if (vebs_free)
+               *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
+       if (floating) {
+               u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
+
+               if (flags & I40E_AQC_ADD_VEB_FLOATING)
+                       *floating = true;
+               else
+                       *floating = false;
+       }
+
+get_veb_exit:
+       return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+                       struct i40e_aqc_add_macvlan_element_data *mv_list,
+                       u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_macvlan *cmd =
+               (struct i40e_aqc_macvlan *)&desc.params.raw;
+       i40e_status status;
+       u16 buf_size;
+
+       if (count == 0 || !mv_list || !hw)
+               return I40E_ERR_PARAM;
+
+       buf_size = count * sizeof(*mv_list);
+
+       /* prep the rest of the request */
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+       cmd->num_addresses = CPU_TO_LE16(count);
+       cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+       cmd->seid[1] = 0;
+       cmd->seid[2] = 0;
+
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (buf_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+                                   cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+                       struct i40e_aqc_remove_macvlan_element_data *mv_list,
+                       u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_macvlan *cmd =
+               (struct i40e_aqc_macvlan *)&desc.params.raw;
+       i40e_status status;
+       u16 buf_size;
+
+       if (count == 0 || !mv_list || !hw)
+               return I40E_ERR_PARAM;
+
+       buf_size = count * sizeof(*mv_list);
+
+       /* prep the rest of the request */
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+       cmd->num_addresses = CPU_TO_LE16(count);
+       cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+       cmd->seid[1] = 0;
+       cmd->seid[2] = 0;
+
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (buf_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+                                      cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+                       struct i40e_aqc_add_remove_vlan_element_data *v_list,
+                       u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_macvlan *cmd =
+               (struct i40e_aqc_macvlan *)&desc.params.raw;
+       i40e_status status;
+       u16 buf_size;
+
+       if (count == 0 || !v_list || !hw)
+               return I40E_ERR_PARAM;
+
+       buf_size = count * sizeof(*v_list);
+
+       /* prep the rest of the request */
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+       cmd->num_addresses = CPU_TO_LE16(count);
+       cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+       cmd->seid[1] = 0;
+       cmd->seid[2] = 0;
+
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (buf_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+                                      cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+                       struct i40e_aqc_add_remove_vlan_element_data *v_list,
+                       u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_macvlan *cmd =
+               (struct i40e_aqc_macvlan *)&desc.params.raw;
+       i40e_status status;
+       u16 buf_size;
+
+       if (count == 0 || !v_list || !hw)
+               return I40E_ERR_PARAM;
+
+       buf_size = count * sizeof(*v_list);
+
+       /* prep the rest of the request */
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+       cmd->num_addresses = CPU_TO_LE16(count);
+       cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+       cmd->seid[1] = 0;
+       cmd->seid[2] = 0;
+
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (buf_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+                                      cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+                               u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_pf_vf_message *cmd =
+               (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+       cmd->id = CPU_TO_LE32(vfid);
+       desc.cookie_high = CPU_TO_LE32(v_opcode);
+       desc.cookie_low = CPU_TO_LE32(v_retval);
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+       if (msglen) {
+               desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+                                               I40E_AQ_FLAG_RD));
+               if (msglen > I40E_AQ_LARGE_BUF)
+                       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+               desc.datalen = CPU_TO_LE16(msglen);
+       }
+       status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+                               u32 reg_addr, u64 *reg_val,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_debug_reg_read_write *cmd_resp =
+               (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+       i40e_status status;
+
+       if (reg_val == NULL)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+
+       cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (status == I40E_SUCCESS) {
+               *reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) |
+                          (u64)LE32_TO_CPU(cmd_resp->value_low);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_debug_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write to a register using the admin queue commands
+ **/
+i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+                               u32 reg_addr, u64 reg_val,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_debug_reg_read_write *cmd =
+               (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+
+       cmd->address = CPU_TO_LE32(reg_addr);
+       cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
+       cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * query the HMC profile of the device.
+ **/
+i40e_status i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+                               enum i40e_aq_hmc_profile *profile,
+                               u8 *pe_vf_enabled_count,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aq_get_set_hmc_resource_profile *resp =
+               (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                               i40e_aqc_opc_query_hmc_resource_profile);
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
+                  I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
+       *pe_vf_enabled_count = resp->pe_vf_enabled &
+                              I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * set the HMC profile of the device.
+ **/
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+                               enum i40e_aq_hmc_profile profile,
+                               u8 pe_vf_enabled_count,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aq_get_set_hmc_resource_profile *cmd =
+               (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_set_hmc_resource_profile);
+
+       cmd->pm_profile = (u8)profile;
+       cmd->pe_vf_enabled = pe_vf_enabled_count;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+                               enum i40e_aq_resources_ids resource,
+                               enum i40e_aq_resource_access_type access,
+                               u8 sdp_number, u64 *timeout,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_request_resource *cmd_resp =
+               (struct i40e_aqc_request_resource *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+       cmd_resp->resource_id = CPU_TO_LE16(resource);
+       cmd_resp->access_type = CPU_TO_LE16(access);
+       cmd_resp->resource_number = CPU_TO_LE32(sdp_number);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+       /* The completion specifies the maximum time in ms that the driver
+        * may hold the resource in the Timeout field.
+        * If the resource is held by someone else, the command completes with
+        * busy return value and the timeout field indicates the maximum time
+        * the current owner of the resource has to free it.
+        */
+       if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+               *timeout = LE32_TO_CPU(cmd_resp->timeout);
+
+       return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+                               enum i40e_aq_resources_ids resource,
+                               u8 sdp_number,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_request_resource *cmd =
+               (struct i40e_aqc_request_resource *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+       cmd->resource_id = CPU_TO_LE16(resource);
+       cmd->resource_number = CPU_TO_LE32(sdp_number);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+                               u32 offset, u16 length, void *data,
+                               bool last_command,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_nvm_update *cmd =
+               (struct i40e_aqc_nvm_update *)&desc.params.raw;
+       i40e_status status;
+
+       /* In offset the highest byte must be zeroed. */
+       if (offset & 0xFF000000) {
+               status = I40E_ERR_PARAM;
+               goto i40e_aq_read_nvm_exit;
+       }
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+       /* If this is the last command in a series, set the proper flag. */
+       if (last_command)
+               cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+       cmd->module_pointer = module_pointer;
+       cmd->offset = CPU_TO_LE32(offset);
+       cmd->length = CPU_TO_LE16(length);
+
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       if (length > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+       return status;
+}
+
+/**
+ * i40e_aq_read_nvm_config - read an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature id
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: pointer to count of elements read by FW
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_read_nvm_config(struct i40e_hw *hw,
+                               u8 cmd_flags, u32 field_id, void *data,
+                               u16 buf_size, u16 *element_count,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_nvm_config_read *cmd =
+               (struct i40e_aqc_nvm_config_read *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
+       if (buf_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+       cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
+       if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
+               cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
+       else
+               cmd->element_id_msw = 0;
+
+       status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+       if (!status && element_count)
+               *element_count = LE16_TO_CPU(cmd->element_count);
+
+       return status;
+}
+
+/**
+ * i40e_aq_write_nvm_config - write an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: count of elements to be written
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw,
+                               u8 cmd_flags, void *data, u16 buf_size,
+                               u16 element_count,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_nvm_config_write *cmd =
+               (struct i40e_aqc_nvm_config_write *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (buf_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       cmd->element_count = CPU_TO_LE16(element_count);
+       cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+       status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw,
+                               void *buff, u16 buff_size,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+       if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
+               status = I40E_ERR_NOT_IMPLEMENTED;
+
+       return status;
+}
+
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+                               u32 offset, u16 length, bool last_command,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_nvm_update *cmd =
+               (struct i40e_aqc_nvm_update *)&desc.params.raw;
+       i40e_status status;
+
+       /* In offset the highest byte must be zeroed. */
+       if (offset & 0xFF000000) {
+               status = I40E_ERR_PARAM;
+               goto i40e_aq_erase_nvm_exit;
+       }
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+       /* If this is the last command in a series, set the proper flag. */
+       if (last_command)
+               cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+       cmd->module_pointer = module_pointer;
+       cmd->offset = CPU_TO_LE32(offset);
+       cmd->length = CPU_TO_LE16(length);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+       return status;
+}
+
+#define I40E_DEV_FUNC_CAP_SWITCH_MODE  0x01
+#define I40E_DEV_FUNC_CAP_MGMT_MODE    0x02
+#define I40E_DEV_FUNC_CAP_NPAR         0x03
+#define I40E_DEV_FUNC_CAP_OS2BMC       0x04
+#define I40E_DEV_FUNC_CAP_VALID_FUNC   0x05
+#define I40E_DEV_FUNC_CAP_SRIOV_1_1    0x12
+#define I40E_DEV_FUNC_CAP_VF           0x13
+#define I40E_DEV_FUNC_CAP_VMDQ         0x14
+#define I40E_DEV_FUNC_CAP_802_1_QBG    0x15
+#define I40E_DEV_FUNC_CAP_802_1_QBH    0x16
+#define I40E_DEV_FUNC_CAP_VSI          0x17
+#define I40E_DEV_FUNC_CAP_DCB          0x18
+#define I40E_DEV_FUNC_CAP_FCOE         0x21
+#define I40E_DEV_FUNC_CAP_ISCSI                0x22
+#define I40E_DEV_FUNC_CAP_RSS          0x40
+#define I40E_DEV_FUNC_CAP_RX_QUEUES    0x41
+#define I40E_DEV_FUNC_CAP_TX_QUEUES    0x42
+#define I40E_DEV_FUNC_CAP_MSIX         0x43
+#define I40E_DEV_FUNC_CAP_MSIX_VF      0x44
+#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR        0x45
+#define I40E_DEV_FUNC_CAP_IEEE_1588    0x46
+#define I40E_DEV_FUNC_CAP_FLEX10       0xF1
+#define I40E_DEV_FUNC_CAP_CEM          0xF2
+#define I40E_DEV_FUNC_CAP_IWARP                0x51
+#define I40E_DEV_FUNC_CAP_LED          0x61
+#define I40E_DEV_FUNC_CAP_SDP          0x62
+#define I40E_DEV_FUNC_CAP_MDIO         0x63
+#define I40E_DEV_FUNC_CAP_WR_CSR_PROT  0x64
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+                                    u32 cap_count,
+                                    enum i40e_admin_queue_opc list_type_opc)
+{
+       struct i40e_aqc_list_capabilities_element_resp *cap;
+       u32 valid_functions, num_functions;
+       u32 number, logical_id, phys_id;
+       struct i40e_hw_capabilities *p;
+       u8 major_rev;
+       u32 i = 0;
+       u16 id;
+
+       cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+       if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+               p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+       else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+               p = (struct i40e_hw_capabilities *)&hw->func_caps;
+       else
+               return;
+
+       for (i = 0; i < cap_count; i++, cap++) {
+               id = LE16_TO_CPU(cap->id);
+               number = LE32_TO_CPU(cap->number);
+               logical_id = LE32_TO_CPU(cap->logical_id);
+               phys_id = LE32_TO_CPU(cap->phys_id);
+               major_rev = cap->major_rev;
+
+               switch (id) {
+               case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+                       p->switch_mode = number;
+                       break;
+               case I40E_DEV_FUNC_CAP_MGMT_MODE:
+                       p->management_mode = number;
+                       break;
+               case I40E_DEV_FUNC_CAP_NPAR:
+                       p->npar_enable = number;
+                       break;
+               case I40E_DEV_FUNC_CAP_OS2BMC:
+                       p->os2bmc = number;
+                       break;
+               case I40E_DEV_FUNC_CAP_VALID_FUNC:
+                       p->valid_functions = number;
+                       break;
+               case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+                       if (number == 1)
+                               p->sr_iov_1_1 = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_VF:
+                       p->num_vfs = number;
+                       p->vf_base_id = logical_id;
+                       break;
+               case I40E_DEV_FUNC_CAP_VMDQ:
+                       if (number == 1)
+                               p->vmdq = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_802_1_QBG:
+                       if (number == 1)
+                               p->evb_802_1_qbg = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_802_1_QBH:
+                       if (number == 1)
+                               p->evb_802_1_qbh = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_VSI:
+                       p->num_vsis = number;
+                       break;
+               case I40E_DEV_FUNC_CAP_DCB:
+                       if (number == 1) {
+                               p->dcb = true;
+                               p->enabled_tcmap = logical_id;
+                               p->maxtc = phys_id;
+                       }
+                       break;
+               case I40E_DEV_FUNC_CAP_FCOE:
+                       if (number == 1)
+                               p->fcoe = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_ISCSI:
+                       if (number == 1)
+                               p->iscsi = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_RSS:
+                       p->rss = true;
+                       p->rss_table_size = number;
+                       p->rss_table_entry_width = logical_id;
+                       break;
+               case I40E_DEV_FUNC_CAP_RX_QUEUES:
+                       p->num_rx_qp = number;
+                       p->base_queue = phys_id;
+                       break;
+               case I40E_DEV_FUNC_CAP_TX_QUEUES:
+                       p->num_tx_qp = number;
+                       p->base_queue = phys_id;
+                       break;
+               case I40E_DEV_FUNC_CAP_MSIX:
+                       p->num_msix_vectors = number;
+                       break;
+               case I40E_DEV_FUNC_CAP_MSIX_VF:
+                       p->num_msix_vectors_vf = number;
+                       break;
+               case I40E_DEV_FUNC_CAP_FLEX10:
+                       if (major_rev == 1) {
+                               if (number == 1) {
+                                       p->flex10_enable = true;
+                                       p->flex10_capable = true;
+                               }
+                       } else {
+                               /* Capability revision >= 2 */
+                               if (number & 1)
+                                       p->flex10_enable = true;
+                               if (number & 2)
+                                       p->flex10_capable = true;
+                       }
+                       p->flex10_mode = logical_id;
+                       p->flex10_status = phys_id;
+                       break;
+               case I40E_DEV_FUNC_CAP_CEM:
+                       if (number == 1)
+                               p->mgmt_cem = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_IWARP:
+                       if (number == 1)
+                               p->iwarp = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_LED:
+                       if (phys_id < I40E_HW_CAP_MAX_GPIO)
+                               p->led[phys_id] = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_SDP:
+                       if (phys_id < I40E_HW_CAP_MAX_GPIO)
+                               p->sdp[phys_id] = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_MDIO:
+                       if (number == 1) {
+                               p->mdio_port_num = phys_id;
+                               p->mdio_port_mode = logical_id;
+                       }
+                       break;
+               case I40E_DEV_FUNC_CAP_IEEE_1588:
+                       if (number == 1)
+                               p->ieee_1588 = true;
+                       break;
+               case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+                       p->fd = true;
+                       p->fd_filters_guaranteed = number;
+                       p->fd_filters_best_effort = logical_id;
+                       break;
+               case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+                       p->wr_csr_prot = (u64)number;
+                       p->wr_csr_prot |= (u64)logical_id << 32;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       if (p->fcoe)
+               i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
+#ifdef I40E_FCOE_ENA
+       /* Software override ensuring FCoE is disabled if npar or mfp
+        * mode because it is not supported in these modes.
+        */
+       if (p->npar_enable || p->flex10_enable)
+               p->fcoe = false;
+#else
+       /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
+       p->fcoe = false;
+#endif
+
+       /* count the enabled ports (aka the "not disabled" ports) */
+       hw->num_ports = 0;
+       for (i = 0; i < 4; i++) {
+               u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+               u64 port_cfg = 0;
+
+               /* use AQ read to get the physical register offset instead
+                * of the port relative offset
+                */
+               i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+               if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+                       hw->num_ports++;
+       }
+
+       valid_functions = p->valid_functions;
+       num_functions = 0;
+       while (valid_functions) {
+               if (valid_functions & 1)
+                       num_functions++;
+               valid_functions >>= 1;
+       }
+
+       /* partition id is 1-based, and functions are evenly spread
+        * across the ports as partitions
+        */
+       hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+       hw->num_partitions = num_functions / hw->num_ports;
+
+       /* additional HW specific goodies that might
+        * someday be HW version specific
+        */
+       p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+                               void *buff, u16 buff_size, u16 *data_size,
+                               enum i40e_admin_queue_opc list_type_opc,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aqc_list_capabilites *cmd;
+       struct i40e_aq_desc desc;
+       i40e_status status = I40E_SUCCESS;
+
+       cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+       if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+               list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+               status = I40E_ERR_PARAM;
+               goto exit;
+       }
+
+       i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       if (buff_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+       *data_size = LE16_TO_CPU(desc.datalen);
+
+       if (status)
+               goto exit;
+
+       i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count),
+                                        list_type_opc);
+
+exit:
+       return status;
+}
+
+/**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+                               u32 offset, u16 length, void *data,
+                               bool last_command,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_nvm_update *cmd =
+               (struct i40e_aqc_nvm_update *)&desc.params.raw;
+       i40e_status status;
+
+       /* In offset the highest byte must be zeroed. */
+       if (offset & 0xFF000000) {
+               status = I40E_ERR_PARAM;
+               goto i40e_aq_update_nvm_exit;
+       }
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+       /* If this is the last command in a series, set the proper flag. */
+       if (last_command)
+               cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+       cmd->module_pointer = module_pointer;
+       cmd->offset = CPU_TO_LE32(offset);
+       cmd->length = CPU_TO_LE16(length);
+
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (length > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+       return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+                               u8 mib_type, void *buff, u16 buff_size,
+                               u16 *local_len, u16 *remote_len,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_lldp_get_mib *cmd =
+               (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+       struct i40e_aqc_lldp_get_mib *resp =
+               (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+       i40e_status status;
+
+       if (buff_size == 0 || !buff)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+       /* Indirect Command */
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+       cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+       cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+                      I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+       desc.datalen = CPU_TO_LE16(buff_size);
+
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       if (buff_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+       if (!status) {
+               if (local_len != NULL)
+                       *local_len = LE16_TO_CPU(resp->local_len);
+               if (remote_len != NULL)
+                       *remote_len = LE16_TO_CPU(resp->remote_len);
+       }
+
+       return status;
+}
+
+ /**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+i40e_status i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+                               u8 mib_type, void *buff, u16 buff_size,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_lldp_set_local_mib *cmd =
+               (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+       i40e_status status;
+
+       if (buff_size == 0 || !buff)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                               i40e_aqc_opc_lldp_set_local_mib);
+       /* Indirect Command */
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (buff_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+       desc.datalen = CPU_TO_LE16(buff_size);
+
+       cmd->type = mib_type;
+       cmd->length = CPU_TO_LE16(buff_size);
+       cmd->address_high = CPU_TO_LE32(high_16_bits((u64)buff));
+       cmd->address_low =  CPU_TO_LE32(lower_32_bits((u64)buff));
+
+       status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+       return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+                               bool enable_update,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_lldp_update_mib *cmd =
+               (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+       if (!enable_update)
+               cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_add_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to add
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be added
+ * @mib_len: length of the LLDP MIB returned in response
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add the specified TLV to LLDP Local MIB for the given bridge type,
+ * it is responsibility of the caller to make sure that the TLV is not
+ * already present in the LLDPDU.
+ * In return firmware will write the complete LLDP MIB with the newly
+ * added TLV in the response buffer.
+ **/
+i40e_status i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+                               void *buff, u16 buff_size, u16 tlv_len,
+                               u16 *mib_len,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_lldp_add_tlv *cmd =
+               (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+       i40e_status status;
+
+       if (buff_size == 0 || !buff || tlv_len == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
+
+       /* Indirect Command */
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (buff_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+       desc.datalen = CPU_TO_LE16(buff_size);
+
+       cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+                     I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+       cmd->len = CPU_TO_LE16(tlv_len);
+
+       status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+       if (!status) {
+               if (mib_len != NULL)
+                       *mib_len = LE16_TO_CPU(desc.datalen);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_update_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to update
+ * @buff_size: size of the buffer holding original and updated TLVs
+ * @old_len: Length of the Original TLV
+ * @new_len: Length of the Updated TLV
+ * @offset: offset of the updated TLV in the buff
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the specified TLV to the LLDP Local MIB for the given bridge type.
+ * Firmware will place the complete LLDP MIB in response buffer with the
+ * updated TLV.
+ **/
+i40e_status i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+                               u8 bridge_type, void *buff, u16 buff_size,
+                               u16 old_len, u16 new_len, u16 offset,
+                               u16 *mib_len,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_lldp_update_tlv *cmd =
+               (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
+       i40e_status status;
+
+       if (buff_size == 0 || !buff || offset == 0 ||
+           old_len == 0 || new_len == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
+
+       /* Indirect Command */
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (buff_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+       desc.datalen = CPU_TO_LE16(buff_size);
+
+       cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+                     I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+       cmd->old_len = CPU_TO_LE16(old_len);
+       cmd->new_offset = CPU_TO_LE16(offset);
+       cmd->new_len = CPU_TO_LE16(new_len);
+
+       status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+       if (!status) {
+               if (mib_len != NULL)
+                       *mib_len = LE16_TO_CPU(desc.datalen);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_delete_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: pointer to a user supplied buffer that has the TLV
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be deleted
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Delete the specified TLV from LLDP Local MIB for the given bridge type.
+ * The firmware places the entire LLDP MIB in the response buffer.
+ **/
+i40e_status i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+                               u8 bridge_type, void *buff, u16 buff_size,
+                               u16 tlv_len, u16 *mib_len,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_lldp_add_tlv *cmd =
+               (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+       i40e_status status;
+
+       if (buff_size == 0 || !buff)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
+
+       /* Indirect Command */
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (buff_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+       desc.datalen = CPU_TO_LE16(buff_size);
+       cmd->len = CPU_TO_LE16(tlv_len);
+       cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+                     I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+       status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+       if (!status) {
+               if (mib_len != NULL)
+                       *mib_len = LE16_TO_CPU(desc.datalen);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_lldp_stop *cmd =
+               (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+       if (shutdown_agent)
+               cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_lldp_start *cmd =
+               (struct i40e_aqc_lldp_start *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+       cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_cee_dcb_config
+ * @hw: pointer to the hw struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @buff_size: size of the buffer passed
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware
+ **/
+i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+                               void *buff, u16 buff_size,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+
+       if (buff_size == 0 || !buff)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
+
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
+                                      cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the hw struct
+ * @start_agent: True if DCBx Agent needs to be Started
+ *                             False if DCBx Agent needs to be Stopped
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent
+ **/
+i40e_status i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+                               bool start_agent,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
+               (struct i40e_aqc_lldp_stop_start_specific_agent *)
+                               &desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                               i40e_aqc_opc_lldp_stop_start_spec_agent);
+
+       if (start_agent)
+               cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+                               u16 udp_port, u8 protocol_index,
+                               u8 *filter_index,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_udp_tunnel *cmd =
+               (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+       struct i40e_aqc_del_udp_tunnel_completion *resp =
+               (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+       cmd->udp_port = CPU_TO_LE16(udp_port);
+       cmd->protocol_type = protocol_index;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status && filter_index)
+               *filter_index = resp->index;
+
+       return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_remove_udp_tunnel *cmd =
+               (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+       cmd->index = index;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_get_switch_resource_alloc (0x0204)
+ * @hw: pointer to the hw struct
+ * @num_entries: pointer to u8 to store the number of resource entries returned
+ * @buf: pointer to a user supplied buffer.  This buffer must be large enough
+ *        to store the resource information for all resource types.  Each
+ *        resource type is a i40e_aqc_switch_resource_alloc_data structure.
+ * @count: size, in bytes, of the buffer provided
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Query the resources allocated to a function.
+ **/
+i40e_status i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+                       u8 *num_entries,
+                       struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+                       u16 count,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
+               (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
+       i40e_status status;
+       u16 length = count * sizeof(*buf);
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                       i40e_aqc_opc_get_switch_resource_alloc);
+
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       if (length > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+       if (!status && num_entries)
+               *num_entries = cmd_resp->num_entries;
+
+       return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_switch_seid *cmd =
+               (struct i40e_aqc_switch_seid *)&desc.params.raw;
+       i40e_status status;
+
+       if (seid == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+       cmd->seid = CPU_TO_LE16(seid);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * @hw: pointer to the hw struct
+ * @flags: component flags
+ * @mac_seid: uplink seid (MAC SEID)
+ * @vsi_seid: connected vsi seid
+ * @ret_seid: seid of create pv component
+ *
+ * This instantiates an i40e port virtualizer with specified flags.
+ * Depending on specified flags the port virtualizer can act as a
+ * 802.1Qbr port virtualizer or a 802.1Qbg S-component.
+ */
+i40e_status i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+                                      u16 mac_seid, u16 vsi_seid,
+                                      u16 *ret_seid)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_update_pv *cmd =
+               (struct i40e_aqc_add_update_pv *)&desc.params.raw;
+       struct i40e_aqc_add_update_pv_completion *resp =
+               (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw;
+       i40e_status status;
+
+       if (vsi_seid == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv);
+       cmd->command_flags = CPU_TO_LE16(flags);
+       cmd->uplink_seid = CPU_TO_LE16(mac_seid);
+       cmd->connected_seid = CPU_TO_LE16(vsi_seid);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+       if (!status && ret_seid)
+               *ret_seid = LE16_TO_CPU(resp->pv_seid);
+
+       return status;
+}
+
+/**
+ * i40e_aq_add_tag - Add an S/E-tag
+ * @hw: pointer to the hw struct
+ * @direct_to_queue: should s-tag direct flow to a specific queue
+ * @vsi_seid: VSI SEID to use this tag
+ * @tag: value of the tag
+ * @queue_num: queue number, only valid is direct_to_queue is true
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates an S- or E-tag to a VSI in the switch complex.  It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+i40e_status i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+                               u16 vsi_seid, u16 tag, u16 queue_num,
+                               u16 *tags_used, u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_tag *cmd =
+               (struct i40e_aqc_add_tag *)&desc.params.raw;
+       struct i40e_aqc_add_remove_tag_completion *resp =
+               (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+       i40e_status status;
+
+       if (vsi_seid == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag);
+
+       cmd->seid = CPU_TO_LE16(vsi_seid);
+       cmd->tag = CPU_TO_LE16(tag);
+       if (direct_to_queue) {
+               cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE);
+               cmd->queue_number = CPU_TO_LE16(queue_num);
+       }
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status) {
+               if (tags_used != NULL)
+                       *tags_used = LE16_TO_CPU(resp->tags_used);
+               if (tags_free != NULL)
+                       *tags_free = LE16_TO_CPU(resp->tags_free);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_remove_tag - Remove an S- or E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID this tag is associated with
+ * @tag: value of the S-tag to delete
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an S- or E-tag from a VSI in the switch complex.  It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+i40e_status i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+                               u16 tag, u16 *tags_used, u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_remove_tag *cmd =
+               (struct i40e_aqc_remove_tag *)&desc.params.raw;
+       struct i40e_aqc_add_remove_tag_completion *resp =
+               (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+       i40e_status status;
+
+       if (vsi_seid == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag);
+
+       cmd->seid = CPU_TO_LE16(vsi_seid);
+       cmd->tag = CPU_TO_LE16(tag);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status) {
+               if (tags_used != NULL)
+                       *tags_used = LE16_TO_CPU(resp->tags_used);
+               if (tags_free != NULL)
+                       *tags_free = LE16_TO_CPU(resp->tags_free);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_add_mcast_etag - Add a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
+ * @etag: value of E-tag to add
+ * @num_tags_in_buf: number of unicast E-tags in indirect buffer
+ * @buf: address of indirect buffer
+ * @tags_used: return value, number of E-tags in use by this port
+ * @tags_free: return value, number of unallocated M-tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates a multicast E-tag to a port virtualizer.  It will return
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ *
+ * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
+ * num_tags_in_buf long.
+ **/
+i40e_status i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+                               u16 etag, u8 num_tags_in_buf, void *buf,
+                               u16 *tags_used, u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_remove_mcast_etag *cmd =
+               (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+       struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+          (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+       i40e_status status;
+       u16 length = sizeof(u16) * num_tags_in_buf;
+
+       if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_add_multicast_etag);
+
+       cmd->pv_seid = CPU_TO_LE16(pv_seid);
+       cmd->etag = CPU_TO_LE16(etag);
+       cmd->num_unicast_etags = num_tags_in_buf;
+
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (length > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+       if (!status) {
+               if (tags_used != NULL)
+                       *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+               if (tags_free != NULL)
+                       *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_remove_mcast_etag - Remove a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer SEID this M-tag is associated with
+ * @etag: value of the E-tag to remove
+ * @tags_used: return value, number of tags in use by this port
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an E-tag from the port virtualizer.  It will return
+ * the number of tags allocated by the port, and the number of unallocated
+ * tags available.
+ **/
+i40e_status i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+                               u16 etag, u16 *tags_used, u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_remove_mcast_etag *cmd =
+               (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+       struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+          (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+       i40e_status status;
+
+
+       if (pv_seid == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_remove_multicast_etag);
+
+       cmd->pv_seid = CPU_TO_LE16(pv_seid);
+       cmd->etag = CPU_TO_LE16(etag);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status) {
+               if (tags_used != NULL)
+                       *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+               if (tags_free != NULL)
+                       *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_update_tag - Update an S/E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID using this S-tag
+ * @old_tag: old tag value
+ * @new_tag: new tag value
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This updates the value of the tag currently attached to this VSI
+ * in the switch complex.  It will return the number of tags allocated
+ * by the PF, and the number of unallocated tags available.
+ **/
+i40e_status i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+                               u16 old_tag, u16 new_tag, u16 *tags_used,
+                               u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_update_tag *cmd =
+               (struct i40e_aqc_update_tag *)&desc.params.raw;
+       struct i40e_aqc_update_tag_completion *resp =
+               (struct i40e_aqc_update_tag_completion *)&desc.params.raw;
+       i40e_status status;
+
+       if (vsi_seid == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag);
+
+       cmd->seid = CPU_TO_LE16(vsi_seid);
+       cmd->old_tag = CPU_TO_LE16(old_tag);
+       cmd->new_tag = CPU_TO_LE16(new_tag);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status) {
+               if (tags_used != NULL)
+                       *tags_used = LE16_TO_CPU(resp->tags_used);
+               if (tags_free != NULL)
+                       *tags_free = LE16_TO_CPU(resp->tags_free);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs
+ * @hw: pointer to the hw struct
+ * @tcmap: TC map for request/release any ignore PFC condition
+ * @request: request or release ignore PFC condition
+ * @tcmap_ret: return TCs for which PFC is currently ignored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This sends out request/release to ignore PFC condition for a TC.
+ * It will return the TCs for which PFC is currently ignored.
+ **/
+i40e_status i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap,
+                               bool request, u8 *tcmap_ret,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_pfc_ignore *cmd_resp =
+               (struct i40e_aqc_pfc_ignore *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc);
+
+       if (request)
+               cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET;
+
+       cmd_resp->tc_bitmap = tcmap;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status) {
+               if (tcmap_ret != NULL)
+                       *tcmap_ret = cmd_resp->tc_bitmap;
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * When LLDP is handled in PF this command is used by the PF
+ * to notify EMP that a DCB setting is modified.
+ * When LLDP is handled in EMP this command is used by the PF
+ * to notify EMP whenever one of the following parameters get
+ * modified:
+ *   - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
+ *   - PCIRTT in PRTDCB_GENC.PCIRTT
+ *   - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * allocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+i40e_status i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+                               u16 vlan_id, u16 *stat_index,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_remove_statistics *cmd_resp =
+               (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+       i40e_status status;
+
+       if ((seid == 0) || (stat_index == NULL))
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics);
+
+       cmd_resp->seid = CPU_TO_LE16(seid);
+       cmd_resp->vlan = CPU_TO_LE16(vlan_id);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status && stat_index)
+               *stat_index = LE16_TO_CPU(cmd_resp->stat_index);
+
+       return status;
+}
+
+/**
+ * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * deallocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+i40e_status i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+                               u16 vlan_id, u16 stat_index,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_remove_statistics *cmd =
+               (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+       i40e_status status;
+
+       if (seid == 0)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_remove_statistics);
+
+       cmd->seid = CPU_TO_LE16(seid);
+       cmd->vlan  = CPU_TO_LE16(vlan_id);
+       cmd->stat_index = CPU_TO_LE16(stat_index);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_port_parameters - set physical port parameters.
+ * @hw: pointer to the hw struct
+ * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
+ * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
+ * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
+ * @double_vlan: if set double VLAN is enabled
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_port_parameters(struct i40e_hw *hw,
+                               u16 bad_frame_vsi, bool save_bad_pac,
+                               bool pad_short_pac, bool double_vlan,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aqc_set_port_parameters *cmd;
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       u16 command_flags = 0;
+
+       cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_port_parameters);
+
+       cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
+       if (save_bad_pac)
+               command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS;
+       if (pad_short_pac)
+               command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS;
+       if (double_vlan)
+               command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+       cmd->command_flags = CPU_TO_LE16(command_flags);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+                               void *buff, u16 buff_size,
+                                enum i40e_admin_queue_opc opcode,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_tx_sched_ind *cmd =
+               (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+       i40e_status status;
+       bool cmd_param_flag = false;
+
+       switch (opcode) {
+       case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+       case i40e_aqc_opc_configure_vsi_tc_bw:
+       case i40e_aqc_opc_enable_switching_comp_ets:
+       case i40e_aqc_opc_modify_switching_comp_ets:
+       case i40e_aqc_opc_disable_switching_comp_ets:
+       case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+       case i40e_aqc_opc_configure_switching_comp_bw_config:
+               cmd_param_flag = true;
+               break;
+       case i40e_aqc_opc_query_vsi_bw_config:
+       case i40e_aqc_opc_query_vsi_ets_sla_config:
+       case i40e_aqc_opc_query_switching_comp_ets_config:
+       case i40e_aqc_opc_query_port_ets_config:
+       case i40e_aqc_opc_query_switching_comp_bw_config:
+               cmd_param_flag = false;
+               break;
+       default:
+               return I40E_ERR_PARAM;
+       }
+
+       i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+       /* Indirect command */
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       if (cmd_param_flag)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+       if (buff_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       desc.datalen = CPU_TO_LE16(buff_size);
+
+       cmd->vsi_seid = CPU_TO_LE16(seid);
+
+       status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+                               u16 seid, u16 credit, u8 max_credit,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_configure_vsi_bw_limit *cmd =
+               (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_configure_vsi_bw_limit);
+
+       cmd->vsi_seid = CPU_TO_LE16(seid);
+       cmd->credit = CPU_TO_LE16(credit);
+       cmd->max_credit = max_credit;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: switching component seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_bw: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+                               u16 seid, u16 credit, u8 max_bw,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_configure_switching_comp_bw_limit *cmd =
+         (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                               i40e_aqc_opc_configure_switching_comp_bw_limit);
+
+       cmd->seid = CPU_TO_LE16(seid);
+       cmd->credit = CPU_TO_LE16(credit);
+       cmd->max_bw = max_bw;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+                       u16 seid,
+                       struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+                                   i40e_aqc_opc_configure_vsi_ets_sla_bw_limit,
+                                   cmd_details);
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+                       u16 seid,
+                       struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+                                   i40e_aqc_opc_configure_vsi_tc_bw,
+                                   cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+               u16 seid,
+               struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+               enum i40e_admin_queue_opc opcode,
+               struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+                                   sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+       u16 seid,
+       struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+       struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+                           i40e_aqc_opc_configure_switching_comp_bw_config,
+                           cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_ets_bw_limit(
+       struct i40e_hw *hw, u16 seid,
+       struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+       struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+                           i40e_aqc_opc_configure_switching_comp_ets_bw_limit,
+                           cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+                       u16 seid,
+                       struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+                                   i40e_aqc_opc_query_vsi_bw_config,
+                                   cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+                       u16 seid,
+                       struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+                                   i40e_aqc_opc_query_vsi_ets_sla_config,
+                                   cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+               u16 seid,
+               struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+               struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+                                  i40e_aqc_opc_query_switching_comp_ets_config,
+                                  cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+                       u16 seid,
+                       struct i40e_aqc_query_port_ets_config_resp *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+                                   i40e_aqc_opc_query_port_ets_config,
+                                   cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+               u16 seid,
+               struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+               struct i40e_asq_cmd_details *cmd_details)
+{
+       return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+                                   i40e_aqc_opc_query_switching_comp_bw_config,
+                                   cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns I40E_SUCCESS if the values passed are valid and within
+ * range else returns an error.
+ **/
+static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
+                               struct i40e_filter_control_settings *settings)
+{
+       u32 fcoe_cntx_size, fcoe_filt_size;
+       u32 pe_cntx_size, pe_filt_size;
+       u32 fcoe_fmax;
+
+       u32 val;
+
+       /* Validate FCoE settings passed */
+       switch (settings->fcoe_filt_num) {
+       case I40E_HASH_FILTER_SIZE_1K:
+       case I40E_HASH_FILTER_SIZE_2K:
+       case I40E_HASH_FILTER_SIZE_4K:
+       case I40E_HASH_FILTER_SIZE_8K:
+       case I40E_HASH_FILTER_SIZE_16K:
+       case I40E_HASH_FILTER_SIZE_32K:
+               fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+               fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+               break;
+       default:
+               return I40E_ERR_PARAM;
+       }
+
+       switch (settings->fcoe_cntx_num) {
+       case I40E_DMA_CNTX_SIZE_512:
+       case I40E_DMA_CNTX_SIZE_1K:
+       case I40E_DMA_CNTX_SIZE_2K:
+       case I40E_DMA_CNTX_SIZE_4K:
+               fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+               fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+               break;
+       default:
+               return I40E_ERR_PARAM;
+       }
+
+       /* Validate PE settings passed */
+       switch (settings->pe_filt_num) {
+       case I40E_HASH_FILTER_SIZE_1K:
+       case I40E_HASH_FILTER_SIZE_2K:
+       case I40E_HASH_FILTER_SIZE_4K:
+       case I40E_HASH_FILTER_SIZE_8K:
+       case I40E_HASH_FILTER_SIZE_16K:
+       case I40E_HASH_FILTER_SIZE_32K:
+       case I40E_HASH_FILTER_SIZE_64K:
+       case I40E_HASH_FILTER_SIZE_128K:
+       case I40E_HASH_FILTER_SIZE_256K:
+       case I40E_HASH_FILTER_SIZE_512K:
+       case I40E_HASH_FILTER_SIZE_1M:
+               pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+               pe_filt_size <<= (u32)settings->pe_filt_num;
+               break;
+       default:
+               return I40E_ERR_PARAM;
+       }
+
+       switch (settings->pe_cntx_num) {
+       case I40E_DMA_CNTX_SIZE_512:
+       case I40E_DMA_CNTX_SIZE_1K:
+       case I40E_DMA_CNTX_SIZE_2K:
+       case I40E_DMA_CNTX_SIZE_4K:
+       case I40E_DMA_CNTX_SIZE_8K:
+       case I40E_DMA_CNTX_SIZE_16K:
+       case I40E_DMA_CNTX_SIZE_32K:
+       case I40E_DMA_CNTX_SIZE_64K:
+       case I40E_DMA_CNTX_SIZE_128K:
+       case I40E_DMA_CNTX_SIZE_256K:
+               pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+               pe_cntx_size <<= (u32)settings->pe_cntx_num;
+               break;
+       default:
+               return I40E_ERR_PARAM;
+       }
+
+       /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+       val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+       fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+                    >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+       if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)
+               return I40E_ERR_INVALID_SIZE;
+
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+                               struct i40e_filter_control_settings *settings)
+{
+       i40e_status ret = I40E_SUCCESS;
+       u32 hash_lut_size = 0;
+       u32 val;
+
+       if (!settings)
+               return I40E_ERR_PARAM;
+
+       /* Validate the input settings */
+       ret = i40e_validate_filter_settings(hw, settings);
+       if (ret)
+               return ret;
+
+       /* Read the PF Queue Filter control register */
+       val = rd32(hw, I40E_PFQF_CTL_0);
+
+       /* Program required PE hash buckets for the PF */
+       val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+       val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+               I40E_PFQF_CTL_0_PEHSIZE_MASK;
+       /* Program required PE contexts for the PF */
+       val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+       val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+               I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+       /* Program required FCoE hash buckets for the PF */
+       val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+       val |= ((u32)settings->fcoe_filt_num <<
+                       I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+               I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+       /* Program required FCoE DDP contexts for the PF */
+       val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+       val |= ((u32)settings->fcoe_cntx_num <<
+                       I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+               I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+       /* Program Hash LUT size for the PF */
+       val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+       if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+               hash_lut_size = 1;
+       val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+               I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+       /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+       if (settings->enable_fdir)
+               val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+       if (settings->enable_ethtype)
+               val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+       if (settings->enable_macvlan)
+               val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+       wr32(hw, I40E_PFQF_CTL_0, val);
+
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+                               u8 *mac_addr, u16 ethtype, u16 flags,
+                               u16 vsi_seid, u16 queue, bool is_add,
+                               struct i40e_control_filter_stats *stats,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_remove_control_packet_filter *cmd =
+               (struct i40e_aqc_add_remove_control_packet_filter *)
+               &desc.params.raw;
+       struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+               (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+               &desc.params.raw;
+       i40e_status status;
+
+       if (vsi_seid == 0)
+               return I40E_ERR_PARAM;
+
+       if (is_add) {
+               i40e_fill_default_direct_cmd_desc(&desc,
+                               i40e_aqc_opc_add_control_packet_filter);
+               cmd->queue = CPU_TO_LE16(queue);
+       } else {
+               i40e_fill_default_direct_cmd_desc(&desc,
+                               i40e_aqc_opc_remove_control_packet_filter);
+       }
+
+       if (mac_addr)
+               i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN,
+                           I40E_NONDMA_TO_NONDMA);
+
+       cmd->etype = CPU_TO_LE16(ethtype);
+       cmd->flags = CPU_TO_LE16(flags);
+       cmd->seid = CPU_TO_LE16(vsi_seid);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       if (!status && stats) {
+               stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used);
+               stats->etype_used = LE16_TO_CPU(resp->etype_used);
+               stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free);
+               stats->etype_free = LE16_TO_CPU(resp->etype_free);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+                                                   u16 seid)
+{
+       u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+                  I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+                  I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+       u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+       i40e_status status;
+
+       status = i40e_aq_add_rem_control_packet_filter(hw, 0, ethtype, flag,
+                                                      seid, 0, true, NULL,
+                                                      NULL);
+       if (status)
+               hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
+/**
+ * i40e_aq_add_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+i40e_status i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+       u16 seid,
+       struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+       u8 filter_count)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_remove_cloud_filters *cmd =
+       (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+       u16 buff_len;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_add_cloud_filters);
+
+       buff_len = filter_count * sizeof(*filters);
+       desc.datalen = CPU_TO_LE16(buff_len);
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       cmd->num_filters = filter_count;
+       cmd->seid = CPU_TO_LE16(seid);
+
+       status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_aq_remove_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+i40e_status i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+               u16 seid,
+               struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+               u8 filter_count)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_add_remove_cloud_filters *cmd =
+       (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+       i40e_status status;
+       u16 buff_len;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_remove_cloud_filters);
+
+       buff_len = filter_count * sizeof(*filters);
+       desc.datalen = CPU_TO_LE16(buff_len);
+       desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       cmd->num_filters = filter_count;
+       cmd->seid = CPU_TO_LE16(seid);
+
+       status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_aq_alternate_write
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ **/
+i40e_status i40e_aq_alternate_write(struct i40e_hw *hw,
+                               u32 reg_addr0, u32 reg_val0,
+                               u32 reg_addr1, u32 reg_val1)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_alternate_write *cmd_resp =
+               (struct i40e_aqc_alternate_write *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write);
+       cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+       cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+       cmd_resp->data0 = CPU_TO_LE32(reg_val0);
+       cmd_resp->data1 = CPU_TO_LE32(reg_val1);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_aq_alternate_write_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of a first register to be modified
+ * @dw_count: number of alternate structure fields to write
+ * @buffer: pointer to the command buffer
+ *
+ * Write 'dw_count' dwords from 'buffer' to alternate structure
+ * starting at 'addr'.
+ *
+ **/
+i40e_status i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+                               u32 addr, u32 dw_count, void *buffer)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_alternate_ind_write *cmd_resp =
+               (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+       i40e_status status;
+
+       if (buffer == NULL)
+               return I40E_ERR_PARAM;
+
+       /* Indirect command */
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                        i40e_aqc_opc_alternate_write_indirect);
+
+       desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+       desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+       if (dw_count > (I40E_AQ_LARGE_BUF/4))
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       cmd_resp->address = CPU_TO_LE32(addr);
+       cmd_resp->length = CPU_TO_LE32(dw_count);
+       cmd_resp->addr_high = CPU_TO_LE32(upper_32_bits((u64)buffer));
+       cmd_resp->addr_low = CPU_TO_LE32(lower_32_bits((u64)buffer));
+
+       status = i40e_asq_send_command(hw, &desc, buffer,
+                                      lower_32_bits(4*dw_count), NULL);
+
+       return status;
+}
+
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+                               u32 reg_addr0, u32 *reg_val0,
+                               u32 reg_addr1, u32 *reg_val1)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_alternate_write *cmd_resp =
+               (struct i40e_aqc_alternate_write *)&desc.params.raw;
+       i40e_status status;
+
+       if (reg_val0 == NULL)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+       cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+       cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+       if (status == I40E_SUCCESS) {
+               *reg_val0 = LE32_TO_CPU(cmd_resp->data0);
+
+               if (reg_val1 != NULL)
+                       *reg_val1 = LE32_TO_CPU(cmd_resp->data1);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_aq_alternate_read_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of the alternate structure field
+ * @dw_count: number of alternate structure fields to read
+ * @buffer: pointer to the command buffer
+ *
+ * Read 'dw_count' dwords from alternate structure starting at 'addr' and
+ * place them in 'buffer'. The buffer should be allocated by caller.
+ *
+ **/
+i40e_status i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+                               u32 addr, u32 dw_count, void *buffer)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_alternate_ind_write *cmd_resp =
+               (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+       i40e_status status;
+
+       if (buffer == NULL)
+               return I40E_ERR_PARAM;
+
+       /* Indirect command */
+       i40e_fill_default_direct_cmd_desc(&desc,
+               i40e_aqc_opc_alternate_read_indirect);
+
+       desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+       desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+       if (dw_count > (I40E_AQ_LARGE_BUF/4))
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       cmd_resp->address = CPU_TO_LE32(addr);
+       cmd_resp->length = CPU_TO_LE32(dw_count);
+       cmd_resp->addr_high = CPU_TO_LE32(upper_32_bits((u64)buffer));
+       cmd_resp->addr_low = CPU_TO_LE32(lower_32_bits((u64)buffer));
+
+       status = i40e_asq_send_command(hw, &desc, buffer,
+                                      lower_32_bits(4*dw_count), NULL);
+
+       return status;
+}
+
+/**
+ *  i40e_aq_alternate_clear
+ *  @hw: pointer to the HW structure.
+ *
+ *  Clear the alternate structures of the port from which the function
+ *  is called.
+ *
+ **/
+i40e_status i40e_aq_alternate_clear(struct i40e_hw *hw)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_alternate_clear_port);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+       return status;
+}
+
+/**
+ *  i40e_aq_alternate_write_done
+ *  @hw: pointer to the HW structure.
+ *  @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ *  @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ *  Indicates to the FW that alternate structures have been changed.
+ *
+ **/
+i40e_status i40e_aq_alternate_write_done(struct i40e_hw *hw,
+               u8 bios_mode, bool *reset_needed)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_alternate_write_done *cmd =
+               (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+       i40e_status status;
+
+       if (reset_needed == NULL)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_alternate_write_done);
+
+       cmd->cmd_flags = CPU_TO_LE16(bios_mode);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+       if (!status && reset_needed)
+               *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
+                                I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
+
+       return status;
+}
+
+/**
+ *  i40e_aq_set_oem_mode
+ *  @hw: pointer to the HW structure.
+ *  @oem_mode: the OEM mode to be used
+ *
+ *  Sets the device to a specific operating mode. Currently the only supported
+ *  mode is no_clp, which causes FW to refrain from using Alternate RAM.
+ *
+ **/
+i40e_status i40e_aq_set_oem_mode(struct i40e_hw *hw,
+               u8 oem_mode)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_alternate_write_done *cmd =
+               (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_alternate_set_mode);
+
+       cmd->cmd_flags = CPU_TO_LE16(oem_mode);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+       hw->bus.type = i40e_bus_type_pci_express;
+
+       switch (link_status & I40E_PCI_LINK_WIDTH) {
+       case I40E_PCI_LINK_WIDTH_1:
+               hw->bus.width = i40e_bus_width_pcie_x1;
+               break;
+       case I40E_PCI_LINK_WIDTH_2:
+               hw->bus.width = i40e_bus_width_pcie_x2;
+               break;
+       case I40E_PCI_LINK_WIDTH_4:
+               hw->bus.width = i40e_bus_width_pcie_x4;
+               break;
+       case I40E_PCI_LINK_WIDTH_8:
+               hw->bus.width = i40e_bus_width_pcie_x8;
+               break;
+       default:
+               hw->bus.width = i40e_bus_width_unknown;
+               break;
+       }
+
+       switch (link_status & I40E_PCI_LINK_SPEED) {
+       case I40E_PCI_LINK_SPEED_2500:
+               hw->bus.speed = i40e_bus_speed_2500;
+               break;
+       case I40E_PCI_LINK_SPEED_5000:
+               hw->bus.speed = i40e_bus_speed_5000;
+               break;
+       case I40E_PCI_LINK_SPEED_8000:
+               hw->bus.speed = i40e_bus_speed_8000;
+               break;
+       default:
+               hw->bus.speed = i40e_bus_speed_unknown;
+               break;
+       }
+}
+
+/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+                               u8 table_id, u32 start_index, u16 buff_size,
+                               void *buff, u16 *ret_buff_size,
+                               u8 *ret_next_table, u32 *ret_next_index,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_debug_dump_internals *cmd =
+               (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+       struct i40e_aqc_debug_dump_internals *resp =
+               (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+       i40e_status status;
+
+       if (buff_size == 0 || !buff)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_debug_dump_internals);
+       /* Indirect Command */
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       if (buff_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       cmd->cluster_id = cluster_id;
+       cmd->table_id = table_id;
+       cmd->idx = CPU_TO_LE32(start_index);
+
+       desc.datalen = CPU_TO_LE16(buff_size);
+
+       status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+       if (!status) {
+               if (ret_buff_size != NULL)
+                       *ret_buff_size = LE16_TO_CPU(desc.datalen);
+               if (ret_next_table != NULL)
+                       *ret_next_table = resp->table_id;
+               if (ret_next_index != NULL)
+                       *ret_next_index = LE32_TO_CPU(resp->idx);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is true if min_bw is a valid value
+ * @max_valid: pointer for bool that is true if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+                                       u32 *max_bw, u32 *min_bw,
+                                       bool *min_valid, bool *max_valid)
+{
+       i40e_status status;
+       u32 max_bw_addr, min_bw_addr;
+
+       /* Calculate the address of the min/max bw registers */
+       max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+                     I40E_ALT_STRUCT_MAX_BW_OFFSET +
+                     (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+       min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+                     I40E_ALT_STRUCT_MIN_BW_OFFSET +
+                     (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+
+       /* Read the bandwidths from alt ram */
+       status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+                                       min_bw_addr, min_bw);
+
+       if (*min_bw & I40E_ALT_BW_VALID_MASK)
+               *min_valid = true;
+       else
+               *min_valid = false;
+
+       if (*max_bw & I40E_ALT_BW_VALID_MASK)
+               *max_valid = true;
+       else
+               *max_valid = false;
+
+       return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+                       struct i40e_aqc_configure_partition_bw_data *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       i40e_status status;
+       struct i40e_aq_desc desc;
+       u16 bwd_size = sizeof(*bw_data);
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                               i40e_aqc_opc_configure_partition_bw);
+
+       /* Indirect command */
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+       desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+       if (bwd_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+       desc.datalen = CPU_TO_LE16(bwd_size);
+
+       status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
+
+       return status;
+}
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_configfs.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_configfs.c
new file mode 100644 (file)
index 0000000..fb260b6
--- /dev/null
@@ -0,0 +1,357 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+
+#include <linux/configfs.h>
+#include "i40e.h"
+
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+
+/**
+ * configfs structure for i40e
+ *
+ * This file adds code for configfs support for the i40e driver.  This sets
+ * up a filesystem under /sys/kernel/config in which configuration changes
+ * can be made for the driver's netdevs.
+ *
+ * The initialization in this code creates the "i40e" entry in the configfs
+ * system.  After that, the user needs to use mkdir to create configurations
+ * for specific netdev ports; for example "mkdir eth3".  This code will verify
+ * that such a netdev exists and that it is owned by i40e.
+ *
+ **/
+
+struct i40e_cfgfs_vsi {
+       struct config_item item;
+       struct i40e_vsi *vsi;
+};
+
+static inline struct i40e_cfgfs_vsi *to_i40e_cfgfs_vsi(struct config_item *item)
+{
+       return item ? container_of(item, struct i40e_cfgfs_vsi, item) : NULL;
+}
+
+static struct configfs_attribute i40e_cfgfs_vsi_attr_min_bw = {
+       .ca_owner = THIS_MODULE,
+       .ca_name = "min_bw",
+       .ca_mode = S_IRUGO | S_IWUSR,
+};
+
+static struct configfs_attribute i40e_cfgfs_vsi_attr_max_bw = {
+       .ca_owner = THIS_MODULE,
+       .ca_name = "max_bw",
+       .ca_mode = S_IRUGO | S_IWUSR,
+};
+
+static struct configfs_attribute i40e_cfgfs_vsi_attr_commit = {
+       .ca_owner = THIS_MODULE,
+       .ca_name = "commit",
+       .ca_mode = S_IRUGO | S_IWUSR,
+};
+
+static struct configfs_attribute i40e_cfgfs_vsi_attr_port_count = {
+       .ca_owner = THIS_MODULE,
+       .ca_name = "ports",
+       .ca_mode = S_IRUGO | S_IWUSR,
+};
+
+static struct configfs_attribute i40e_cfgfs_vsi_attr_part_count = {
+       .ca_owner = THIS_MODULE,
+       .ca_name = "partitions",
+       .ca_mode = S_IRUGO | S_IWUSR,
+};
+
+static struct configfs_attribute *i40e_cfgfs_vsi_attrs[] = {
+       &i40e_cfgfs_vsi_attr_min_bw,
+       &i40e_cfgfs_vsi_attr_max_bw,
+       &i40e_cfgfs_vsi_attr_commit,
+       &i40e_cfgfs_vsi_attr_port_count,
+       &i40e_cfgfs_vsi_attr_part_count,
+       NULL,
+};
+
+/**
+ * i40e_cfgfs_vsi_attr_show - Show a VSI's NPAR BW partition info
+ * @item: A pointer back to the configfs item created on driver load
+ * @attr: A pointer to this item's configuration attribute
+ * @page: A pointer to the output buffer
+ **/
+static ssize_t i40e_cfgfs_vsi_attr_show(struct config_item *item,
+                                     struct configfs_attribute *attr,
+                                     char *page)
+{
+       struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item);
+       struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back;
+       ssize_t count;
+
+       if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi])
+               return 0;
+
+       if (strncmp(attr->ca_name, "min_bw", 6) == 0)
+               count = sprintf(page, "%s %s %d%%\n",
+                               i40e_cfgfs_vsi->vsi->netdev->name,
+                               (pf->min_bw & I40E_ALT_BW_RELATIVE_MASK) ?
+                               "Relative Min BW" : "Absolute Min BW",
+                               pf->min_bw & I40E_ALT_BW_VALUE_MASK);
+       else if (strncmp(attr->ca_name, "max_bw", 6) == 0)
+               count = sprintf(page, "%s %s %d%%\n",
+                               i40e_cfgfs_vsi->vsi->netdev->name,
+                               (pf->max_bw & I40E_ALT_BW_RELATIVE_MASK) ?
+                               "Relative Max BW" : "Absolute Max BW",
+                               pf->max_bw & I40E_ALT_BW_VALUE_MASK);
+       else if (strncmp(attr->ca_name, "ports", 5) == 0)
+               count = sprintf(page, "%d\n",
+                               pf->hw.num_ports);
+       else if (strncmp(attr->ca_name, "partitions", 10) == 0)
+               count = sprintf(page, "%d\n",
+                               pf->hw.num_partitions);
+       else
+               return 0;
+
+       return count;
+}
+
+/**
+ * i40e_cfgfs_vsi_attr_store - Show a VSI's NPAR BW partition info
+ * @item: A pointer back to the configfs item created on driver load
+ * @attr: A pointer to this item's configuration attribute
+ * @page: A pointer to the user input buffer holding the user input values
+ **/
+static ssize_t i40e_cfgfs_vsi_attr_store(struct config_item *item,
+                                      struct configfs_attribute *attr,
+                                      const char *page, size_t count)
+{
+       struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item);
+       struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back;
+       char *p = (char *) page;
+       int rc;
+       unsigned long tmp;
+
+       if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi])
+               return 0;
+
+       if (!p || (*p && (*p == '\n')))
+               return -EINVAL;
+
+       rc = kstrtoul(p, 10, &tmp);
+       if (rc)
+               return rc;
+       if (tmp > 100)
+               return -ERANGE;
+
+       if (strncmp(attr->ca_name, "min_bw", 6) == 0) {
+               if (tmp > (pf->max_bw & I40E_ALT_BW_VALUE_MASK))
+                       return -ERANGE;
+               /* Preserve the valid and relative BW bits - the rest is
+                * don't care.
+                */
+               pf->min_bw &= (I40E_ALT_BW_RELATIVE_MASK |
+                                   I40E_ALT_BW_VALID_MASK);
+               pf->min_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
+               i40e_set_partition_bw_setting(pf);
+       } else if (strncmp(attr->ca_name, "max_bw", 6) == 0) {
+               if (tmp < 1 ||
+                   tmp < (pf->min_bw & I40E_ALT_BW_VALUE_MASK))
+                       return -ERANGE;
+               /* Preserve the valid and relative BW bits - the rest is
+                * don't care.
+                */
+               pf->max_bw &= (I40E_ALT_BW_RELATIVE_MASK |
+                                   I40E_ALT_BW_VALID_MASK);
+               pf->max_bw |= (tmp & I40E_ALT_BW_VALUE_MASK);
+               i40e_set_partition_bw_setting(pf);
+       } else if (strncmp(attr->ca_name, "commit", 6) == 0 && tmp == 1) {
+               if (i40e_commit_partition_bw_setting(pf))
+                       return -EIO;
+       }
+
+       return count;
+}
+
+/**
+ * i40e_cfgfs_vsi_release - Free up the configuration item memory
+ * @item: A pointer back to the configfs item created on driver load
+ **/
+static void i40e_cfgfs_vsi_release(struct config_item *item)
+{
+       kfree(to_i40e_cfgfs_vsi(item));
+}
+
+static struct configfs_item_operations i40e_cfgfs_vsi_item_ops = {
+       .release                = i40e_cfgfs_vsi_release,
+       .show_attribute         = i40e_cfgfs_vsi_attr_show,
+       .store_attribute        = i40e_cfgfs_vsi_attr_store,
+};
+
+static struct config_item_type i40e_cfgfs_vsi_type = {
+       .ct_item_ops    = &i40e_cfgfs_vsi_item_ops,
+       .ct_attrs       = i40e_cfgfs_vsi_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+struct i40e_cfgfs_group {
+       struct config_group group;
+};
+
+/**
+ * to_i40e_cfgfs_group - Get the group pointer from the config item
+ * @item: A pointer back to the configfs item created on driver load
+ **/
+static inline struct i40e_cfgfs_group *
+to_i40e_cfgfs_group(struct config_item *item)
+{
+       return item ? container_of(to_config_group(item),
+                                  struct i40e_cfgfs_group, group) : NULL;
+}
+
+/**
+ * i40e_cfgfs_group_make_item - Create the configfs item with group container
+ * @group: A pointer to our configfs group
+ * @name: A pointer to the nume of the device we're looking for
+ **/
+static struct config_item *
+i40e_cfgfs_group_make_item(struct config_group *group, const char *name)
+{
+       struct i40e_cfgfs_vsi *i40e_cfgfs_vsi;
+       struct net_device *netdev;
+       struct i40e_netdev_priv *np;
+
+       read_lock(&dev_base_lock);
+       netdev = first_net_device(&init_net);
+       while (netdev) {
+               if (strncmp(netdev->name, name, sizeof(netdev->name)) == 0)
+                       break;
+               netdev = next_net_device(netdev);
+       }
+       read_unlock(&dev_base_lock);
+
+       if (!netdev)
+               return ERR_PTR(-ENODEV);
+
+       /* is this netdev owned by i40e? */
+       if (netdev->netdev_ops->ndo_open != i40e_open)
+               return ERR_PTR(-EACCES);
+
+       i40e_cfgfs_vsi = kzalloc(sizeof(struct i40e_cfgfs_vsi), GFP_KERNEL);
+       if (!i40e_cfgfs_vsi)
+               return ERR_PTR(-ENOMEM);
+
+       np = netdev_priv(netdev);
+       i40e_cfgfs_vsi->vsi = np->vsi;
+       config_item_init_type_name(&i40e_cfgfs_vsi->item, name,
+                                  &i40e_cfgfs_vsi_type);
+
+       return &i40e_cfgfs_vsi->item;
+}
+
+static struct configfs_attribute i40e_cfgfs_group_attr_description = {
+       .ca_owner = THIS_MODULE,
+       .ca_name = "description",
+       .ca_mode = S_IRUGO,
+};
+
+static struct configfs_attribute *i40e_cfgfs_group_attrs[] = {
+       &i40e_cfgfs_group_attr_description,
+       NULL,
+};
+
+static ssize_t i40e_cfgfs_group_attr_show(struct config_item *item,
+                                        struct configfs_attribute *attr,
+                                        char *page)
+{
+       return sprintf(page,
+"i40e\n"
+"\n"
+"This subsystem allows the modification of network port configurations.\n"
+"To start, use the name of the network port to be configured in a 'mkdir'\n"
+"command, e.g. 'mkdir eth3'.\n");
+}
+
+static void i40e_cfgfs_group_release(struct config_item *item)
+{
+       kfree(to_i40e_cfgfs_group(item));
+}
+
+static struct configfs_item_operations i40e_cfgfs_group_item_ops = {
+       .release        = i40e_cfgfs_group_release,
+       .show_attribute = i40e_cfgfs_group_attr_show,
+};
+
+/*
+ * Note that, since no extra work is required on ->drop_item(),
+ * no ->drop_item() is provided.
+ */
+static struct configfs_group_operations i40e_cfgfs_group_ops = {
+       .make_item      = i40e_cfgfs_group_make_item,
+};
+
+static struct config_item_type i40e_cfgfs_group_type = {
+       .ct_item_ops    = &i40e_cfgfs_group_item_ops,
+       .ct_group_ops   = &i40e_cfgfs_group_ops,
+       .ct_attrs       = i40e_cfgfs_group_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+static struct configfs_subsystem i40e_cfgfs_group_subsys = {
+       .su_group = {
+               .cg_item = {
+                       .ci_namebuf = "i40e",
+                       .ci_type = &i40e_cfgfs_group_type,
+               },
+       },
+};
+
+/**
+ * i40e_configfs_init - Initialize configfs support for our driver
+ **/
+int i40e_configfs_init(void)
+{
+       int ret;
+       struct configfs_subsystem *subsys;
+
+       subsys = &i40e_cfgfs_group_subsys;
+
+       config_group_init(&subsys->su_group);
+       mutex_init(&subsys->su_mutex);
+       ret = configfs_register_subsystem(subsys);
+       if (ret) {
+               pr_err("Error %d while registering configfs subsystem %s\n",
+                      ret, subsys->su_group.cg_item.ci_namebuf);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_configfs_init - Bail out - unregister configfs subsystem and release
+ **/
+void i40e_configfs_exit(void)
+{
+       configfs_unregister_subsystem(&i40e_cfgfs_group_subsys);
+}
+
+#endif /* CONFIG_CONFIGFS_FS */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.c
new file mode 100644 (file)
index 0000000..ac0053f
--- /dev/null
@@ -0,0 +1,1289 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+       u32 reg;
+
+       if (!status)
+               return I40E_ERR_PARAM;
+
+       reg = rd32(hw, I40E_PRTDCB_GENS);
+       *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+                       I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+                                      struct i40e_dcbx_config *dcbcfg)
+{
+       struct i40e_dcb_ets_config *etscfg;
+       u8 *buf = tlv->tlvinfo;
+       u16 offset = 0;
+       u8 priority;
+       int i;
+
+       /* First Octet post subtype
+        * --------------------------
+        * |will-|CBS  | Re-  | Max |
+        * |ing  |     |served| TCs |
+        * --------------------------
+        * |1bit | 1bit|3 bits|3bits|
+        */
+       etscfg = &dcbcfg->etscfg;
+       etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+                              I40E_IEEE_ETS_WILLING_SHIFT);
+       etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+                          I40E_IEEE_ETS_CBS_SHIFT);
+       etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+                             I40E_IEEE_ETS_MAXTC_SHIFT);
+
+       /* Move offset to Priority Assignment Table */
+       offset++;
+
+       /* Priority Assignment Table (4 octets)
+        * Octets:|    1    |    2    |    3    |    4    |
+        *        -----------------------------------------
+        *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+        *        -----------------------------------------
+        *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+        *        -----------------------------------------
+        */
+       for (i = 0; i < 4; i++) {
+               priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+                               I40E_IEEE_ETS_PRIO_1_SHIFT);
+               etscfg->prioritytable[i * 2] =  priority;
+               priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+                               I40E_IEEE_ETS_PRIO_0_SHIFT);
+               etscfg->prioritytable[i * 2 + 1] = priority;
+               offset++;
+       }
+
+       /* TC Bandwidth Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               etscfg->tcbwtable[i] = buf[offset++];
+
+       /* TSA Assignment Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+                                      struct i40e_dcbx_config *dcbcfg)
+{
+       u8 *buf = tlv->tlvinfo;
+       u16 offset = 0;
+       u8 priority;
+       int i;
+
+       /* Move offset to priority table */
+       offset++;
+
+       /* Priority Assignment Table (4 octets)
+        * Octets:|    1    |    2    |    3    |    4    |
+        *        -----------------------------------------
+        *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+        *        -----------------------------------------
+        *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+        *        -----------------------------------------
+        */
+       for (i = 0; i < 4; i++) {
+               priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+                               I40E_IEEE_ETS_PRIO_1_SHIFT);
+               dcbcfg->etsrec.prioritytable[i*2] =  priority;
+               priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+                               I40E_IEEE_ETS_PRIO_0_SHIFT);
+               dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+               offset++;
+       }
+
+       /* TC Bandwidth Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+       /* TSA Assignment Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+                                      struct i40e_dcbx_config *dcbcfg)
+{
+       u8 *buf = tlv->tlvinfo;
+
+       /* ----------------------------------------
+        * |will-|MBC  | Re-  | PFC |  PFC Enable  |
+        * |ing  |     |served| cap |              |
+        * -----------------------------------------
+        * |1bit | 1bit|2 bits|4bits| 1 octet      |
+        */
+       dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+                                  I40E_IEEE_PFC_WILLING_SHIFT);
+       dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+                              I40E_IEEE_PFC_MBC_SHIFT);
+       dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+                                 I40E_IEEE_PFC_CAP_SHIFT);
+       dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+                                   struct i40e_dcbx_config *dcbcfg)
+{
+       u16 typelength;
+       u16 offset = 0;
+       u16 length;
+       int i = 0;
+       u8 *buf;
+
+       typelength = ntohs(tlv->typelength);
+       length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+                      I40E_LLDP_TLV_LEN_SHIFT);
+       buf = tlv->tlvinfo;
+
+       /* The App priority table starts 5 octets after TLV header */
+       length -= (sizeof(tlv->ouisubtype) + 1);
+
+       /* Move offset to App Priority Table */
+       offset++;
+
+       /* Application Priority Table (3 octets)
+        * Octets:|         1          |    2    |    3    |
+        *        -----------------------------------------
+        *        |Priority|Rsrvd| Sel |    Protocol ID    |
+        *        -----------------------------------------
+        *   Bits:|23    21|20 19|18 16|15                0|
+        *        -----------------------------------------
+        */
+       while (offset < length) {
+               dcbcfg->app[i].priority = (u8)((buf[offset] &
+                                               I40E_IEEE_APP_PRIO_MASK) >>
+                                              I40E_IEEE_APP_PRIO_SHIFT);
+               dcbcfg->app[i].selector = (u8)((buf[offset] &
+                                               I40E_IEEE_APP_SEL_MASK) >>
+                                              I40E_IEEE_APP_SEL_SHIFT);
+               dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+                                            buf[offset + 2];
+               /* Move to next app */
+               offset += 3;
+               i++;
+               if (i >= I40E_DCBX_MAX_APPS)
+                       break;
+       }
+
+       dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+                               struct i40e_dcbx_config *dcbcfg)
+{
+       u32 ouisubtype;
+       u8 subtype;
+
+       ouisubtype = ntohl(tlv->ouisubtype);
+       subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+                      I40E_LLDP_TLV_SUBTYPE_SHIFT);
+       switch (subtype) {
+       case I40E_IEEE_SUBTYPE_ETS_CFG:
+               i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+               break;
+       case I40E_IEEE_SUBTYPE_ETS_REC:
+               i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+               break;
+       case I40E_IEEE_SUBTYPE_PFC_CFG:
+               i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+               break;
+       case I40E_IEEE_SUBTYPE_APP_PRI:
+               i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+                                    struct i40e_dcbx_config *dcbcfg)
+{
+       struct i40e_dcb_ets_config *etscfg;
+       u8 *buf = tlv->tlvinfo;
+       u16 offset = 0;
+       u8 priority;
+       int i;
+
+       etscfg = &dcbcfg->etscfg;
+
+       if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+               etscfg->willing = 1;
+
+       etscfg->cbs = 0;
+       /* Priority Group Table (4 octets)
+        * Octets:|    1    |    2    |    3    |    4    |
+        *        -----------------------------------------
+        *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+        *        -----------------------------------------
+        *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+        *        -----------------------------------------
+        */
+       for (i = 0; i < 4; i++) {
+               priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+                                I40E_CEE_PGID_PRIO_1_SHIFT);
+               etscfg->prioritytable[i * 2] =  priority;
+               priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+                                I40E_CEE_PGID_PRIO_0_SHIFT);
+               etscfg->prioritytable[i * 2 + 1] = priority;
+               offset++;
+       }
+
+       /* PG Percentage Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               etscfg->tcbwtable[i] = buf[offset++];
+
+       /* Number of TCs supported (1 octet) */
+       etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+                                     struct i40e_dcbx_config *dcbcfg)
+{
+       u8 *buf = tlv->tlvinfo;
+
+       if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+               dcbcfg->pfc.willing = 1;
+
+       /* ------------------------
+        * | PFC Enable | PFC TCs |
+        * ------------------------
+        * | 1 octet    | 1 octet |
+        */
+       dcbcfg->pfc.pfcenable = buf[0];
+       dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+                                  struct i40e_dcbx_config *dcbcfg)
+{
+       u16 length, typelength, offset = 0;
+       struct i40e_cee_app_prio *app;
+       u8 i, up, selector;
+
+       typelength = ntohs(tlv->hdr.typelen);
+       length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+                      I40E_LLDP_TLV_LEN_SHIFT);
+
+       dcbcfg->numapps = length/sizeof(*app);
+       if (!dcbcfg->numapps)
+               return;
+
+       for (i = 0; i < dcbcfg->numapps; i++) {
+               app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+               for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+                       if (app->prio_map & BIT(up))
+                               break;
+               }
+               dcbcfg->app[i].priority = up;
+               /* Get Selector from lower 2 bits, and convert to IEEE */
+               selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+               if (selector == I40E_CEE_APP_SEL_ETHTYPE)
+                       dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+               else if (selector == I40E_CEE_APP_SEL_TCPIP)
+                       dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+               else
+                       /* Keep selector as it is for unknown types */
+                       dcbcfg->app[i].selector = selector;
+               dcbcfg->app[i].protocolid = ntohs(app->protocol);
+               /* Move to next app */
+               offset += sizeof(*app);
+       }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+                              struct i40e_dcbx_config *dcbcfg)
+{
+       u16 len, tlvlen, sublen, typelength;
+       struct i40e_cee_feat_tlv *sub_tlv;
+       u8 subtype, feat_tlv_count = 0;
+       u32 ouisubtype;
+
+       ouisubtype = ntohl(tlv->ouisubtype);
+       subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+                      I40E_LLDP_TLV_SUBTYPE_SHIFT);
+       /* Return if not CEE DCBX */
+       if (subtype != I40E_CEE_DCBX_TYPE)
+               return;
+
+       typelength = ntohs(tlv->typelength);
+       tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+                       I40E_LLDP_TLV_LEN_SHIFT);
+       len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+             sizeof(struct i40e_cee_ctrl_tlv);
+       /* Return if no CEE DCBX Feature TLVs */
+       if (tlvlen <= len)
+               return;
+
+       sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+       while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+               typelength = ntohs(sub_tlv->hdr.typelen);
+               sublen = (u16)((typelength &
+                               I40E_LLDP_TLV_LEN_MASK) >>
+                               I40E_LLDP_TLV_LEN_SHIFT);
+               subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+                               I40E_LLDP_TLV_TYPE_SHIFT);
+               switch (subtype) {
+               case I40E_CEE_SUBTYPE_PG_CFG:
+                       i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+                       break;
+               case I40E_CEE_SUBTYPE_PFC_CFG:
+                       i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+                       break;
+               case I40E_CEE_SUBTYPE_APP_PRI:
+                       i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+                       break;
+               default:
+                       return; /* Invalid Sub-type return */
+               }
+               feat_tlv_count++;
+               /* Move to next sub TLV */
+               sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+                                               sizeof(sub_tlv->hdr.typelen) +
+                                               sublen);
+       }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+                              struct i40e_dcbx_config *dcbcfg)
+{
+       u32 ouisubtype;
+       u32 oui;
+
+       ouisubtype = ntohl(tlv->ouisubtype);
+       oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+                   I40E_LLDP_TLV_OUI_SHIFT);
+       switch (oui) {
+       case I40E_IEEE_8021QAZ_OUI:
+               i40e_parse_ieee_tlv(tlv, dcbcfg);
+               break;
+       case I40E_CEE_DCBX_OUI:
+               i40e_parse_cee_tlv(tlv, dcbcfg);
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+                                   struct i40e_dcbx_config *dcbcfg)
+{
+       i40e_status ret = I40E_SUCCESS;
+       struct i40e_lldp_org_tlv *tlv;
+       u16 type;
+       u16 length;
+       u16 typelength;
+       u16 offset = 0;
+
+       if (!lldpmib || !dcbcfg)
+               return I40E_ERR_PARAM;
+
+       /* set to the start of LLDPDU */
+       lldpmib += ETH_HLEN;
+       tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+       while (1) {
+               typelength = ntohs(tlv->typelength);
+               type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+                            I40E_LLDP_TLV_TYPE_SHIFT);
+               length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+                              I40E_LLDP_TLV_LEN_SHIFT);
+               offset += sizeof(typelength) + length;
+
+               /* END TLV or beyond LLDPDU size */
+               if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+                       break;
+
+               switch (type) {
+               case I40E_TLV_TYPE_ORG:
+                       i40e_parse_org_tlv(tlv, dcbcfg);
+                       break;
+               default:
+                       break;
+               }
+
+               /* Move to next TLV */
+               tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+                                                   sizeof(tlv->typelength) +
+                                                   length);
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+                                  u8 bridgetype,
+                                  struct i40e_dcbx_config *dcbcfg)
+{
+       i40e_status ret = I40E_SUCCESS;
+       struct i40e_virt_mem mem;
+       u8 *lldpmib;
+
+       /* Allocate the LLDPDU */
+       ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+       if (ret)
+               return ret;
+
+       lldpmib = (u8 *)mem.va;
+       ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+                                  (void *)lldpmib, I40E_LLDPDU_SIZE,
+                                  NULL, NULL, NULL);
+       if (ret)
+               goto free_mem;
+
+       /* Parse LLDP MIB to get dcb configuration */
+       ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+       i40e_free_virt_mem(hw, &mem);
+       return ret;
+}
+
+/**
+ * i40e_cee_to_dcb_v1_config
+ * @cee_cfg: pointer to CEE v1 response configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE v1 configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_v1_config(
+                       struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg,
+                       struct i40e_dcbx_config *dcbcfg)
+{
+       u16 status, tlv_status = LE16_TO_CPU(cee_cfg->tlv_status);
+       u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+       u8 i, tc, err;
+
+       /* CEE PG data to ETS config */
+       dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+       /* Note that the FW creates the oper_prio_tc nibbles reversed
+        * from those in the CEE Priority Group sub-TLV.
+        */
+       for (i = 0; i < 4; i++) {
+               tc = (u8)((cee_cfg->oper_prio_tc[i] &
+                        I40E_CEE_PGID_PRIO_0_MASK) >>
+                        I40E_CEE_PGID_PRIO_0_SHIFT);
+               dcbcfg->etscfg.prioritytable[i*2] =  tc;
+               tc = (u8)((cee_cfg->oper_prio_tc[i] &
+                        I40E_CEE_PGID_PRIO_1_MASK) >>
+                        I40E_CEE_PGID_PRIO_1_SHIFT);
+               dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+       }
+
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+                       /* Map it to next empty TC */
+                       dcbcfg->etscfg.prioritytable[i] =
+                                               cee_cfg->oper_num_tc - 1;
+                       dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+               } else {
+                       dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+               }
+       }
+
+       /* CEE PFC data to ETS config */
+       dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+       dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+       status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
+                 I40E_AQC_CEE_APP_STATUS_SHIFT;
+       err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+       /* Add APPs if Error is False */
+       if (!err) {
+               /* CEE operating configuration supports FCoE/iSCSI/FIP only */
+               dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
+
+               /* FCoE APP */
+               dcbcfg->app[0].priority =
+                       (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+                        I40E_AQC_CEE_APP_FCOE_SHIFT;
+               dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+               dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+               /* iSCSI APP */
+               dcbcfg->app[1].priority =
+                       (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+                        I40E_AQC_CEE_APP_ISCSI_SHIFT;
+               dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
+               dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+
+               /* FIP APP */
+               dcbcfg->app[2].priority =
+                       (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+                        I40E_AQC_CEE_APP_FIP_SHIFT;
+               dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
+               dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+       }
+}
+
+/**
+ * i40e_cee_to_dcb_config
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ **/
+static void i40e_cee_to_dcb_config(
+                               struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+                               struct i40e_dcbx_config *dcbcfg)
+{
+       u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
+       u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+       u8 i, tc, err, sync, oper;
+
+       /* CEE PG data to ETS config */
+       dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+       /* Note that the FW creates the oper_prio_tc nibbles reversed
+        * from those in the CEE Priority Group sub-TLV.
+        */
+       for (i = 0; i < 4; i++) {
+               tc = (u8)((cee_cfg->oper_prio_tc[i] &
+                        I40E_CEE_PGID_PRIO_0_MASK) >>
+                        I40E_CEE_PGID_PRIO_0_SHIFT);
+               dcbcfg->etscfg.prioritytable[i*2] =  tc;
+               tc = (u8)((cee_cfg->oper_prio_tc[i] &
+                        I40E_CEE_PGID_PRIO_1_MASK) >>
+                        I40E_CEE_PGID_PRIO_1_SHIFT);
+               dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+       }
+
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) {
+                       /* Map it to next empty TC */
+                       dcbcfg->etscfg.prioritytable[i] =
+                                               cee_cfg->oper_num_tc - 1;
+                       dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT;
+               } else {
+                       dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+               }
+       }
+
+       /* CEE PFC data to ETS config */
+       dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
+       dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+
+       i = 0;
+       status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+                 I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+       err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+       sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+       oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+       /* Add FCoE APP if Error is False and Oper/Sync is True */
+       if (!err && sync && oper) {
+               /* FCoE APP */
+               dcbcfg->app[i].priority =
+                       (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
+                        I40E_AQC_CEE_APP_FCOE_SHIFT;
+               dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+               dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+               i++;
+       }
+
+       status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+                 I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+       err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+       sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+       oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+       /* Add iSCSI APP if Error is False and Oper/Sync is True */
+       if (!err && sync && oper) {
+               /* iSCSI APP */
+               dcbcfg->app[i].priority =
+                       (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
+                        I40E_AQC_CEE_APP_ISCSI_SHIFT;
+               dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+               dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+               i++;
+       }
+
+       status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+                 I40E_AQC_CEE_FIP_STATUS_SHIFT;
+       err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+       sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+       oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+       /* Add FIP APP if Error is False and Oper/Sync is True */
+       if (!err && sync && oper) {
+               /* FIP APP */
+               dcbcfg->app[i].priority =
+                       (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
+                        I40E_AQC_CEE_APP_FIP_SHIFT;
+               dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+               dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+               i++;
+       }
+       dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+       i40e_status ret = I40E_SUCCESS;
+
+       /* IEEE mode */
+       hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+       /* Get Local DCB Config */
+       ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+                                    &hw->local_dcbx_config);
+       if (ret)
+               goto out;
+
+       /* Get Remote DCB Config */
+       ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+                                    I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+                                    &hw->remote_dcbx_config);
+       /* Don't treat ENOENT as an error for Remote MIBs */
+       if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+               ret = I40E_SUCCESS;
+
+out:
+       return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+{
+       i40e_status ret = I40E_SUCCESS;
+       struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+       struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+
+       /* If Firmware version < v4.33 IEEE only */
+       if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+           (hw->aq.fw_maj_ver < 4))
+               return i40e_get_ieee_dcb_config(hw);
+
+       /* If Firmware version == v4.33 use old CEE struct */
+       if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
+               ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
+                                                sizeof(cee_v1_cfg), NULL);
+               if (ret == I40E_SUCCESS) {
+                       /* CEE mode */
+                       hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+                       hw->local_dcbx_config.tlv_status =
+                                       LE16_TO_CPU(cee_v1_cfg.tlv_status);
+                       i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
+                                                 &hw->local_dcbx_config);
+               }
+       } else {
+               ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg,
+                                                sizeof(cee_cfg), NULL);
+               if (ret == I40E_SUCCESS) {
+                       /* CEE mode */
+                       hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+                       hw->local_dcbx_config.tlv_status =
+                                       LE32_TO_CPU(cee_cfg.tlv_status);
+                       i40e_cee_to_dcb_config(&cee_cfg,
+                                              &hw->local_dcbx_config);
+               }
+       }
+
+       /* CEE mode not enabled try querying IEEE data */
+       if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+               return i40e_get_ieee_dcb_config(hw);
+
+       if (ret != I40E_SUCCESS)
+               goto out;
+
+       /* Get CEE DCB Desired Config */
+       ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+                                    &hw->desired_dcbx_config);
+       if (ret)
+               goto out;
+
+       /* Get Remote DCB Config */
+       ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+                            I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+                            &hw->remote_dcbx_config);
+       /* Don't treat ENOENT as an error for Remote MIBs */
+       if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+               ret = I40E_SUCCESS;
+
+out:
+       return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+i40e_status i40e_init_dcb(struct i40e_hw *hw)
+{
+       i40e_status ret = I40E_SUCCESS;
+       struct i40e_lldp_variables lldp_cfg;
+       u8 adminstatus = 0;
+
+       if (!hw->func_caps.dcb)
+               return ret;
+
+       /* Read LLDP NVM area */
+       ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+       if (ret)
+               return ret;
+
+       /* Get the LLDP AdminStatus for the current port */
+       adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
+       adminstatus &= 0xF;
+
+       /* LLDP agent disabled */
+       if (!adminstatus) {
+               hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
+               return ret;
+       }
+
+       /* Get DCBX status */
+       ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+       if (ret)
+               return ret;
+
+       /* Check the DCBX Status */
+       switch (hw->dcbx_status) {
+       case I40E_DCBX_STATUS_DONE:
+       case I40E_DCBX_STATUS_IN_PROGRESS:
+               /* Get current DCBX configuration */
+               ret = i40e_get_dcb_config(hw);
+               if (ret)
+                       return ret;
+               break;
+       case I40E_DCBX_STATUS_DISABLED:
+               return ret;
+       case I40E_DCBX_STATUS_NOT_STARTED:
+       case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+       default:
+               break;
+       }
+
+       /* Configure the LLDP MIB change event */
+       ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+                                 struct i40e_dcbx_config *dcbcfg)
+{
+       u8 priority0, priority1, maxtcwilling = 0;
+       struct i40e_dcb_ets_config *etscfg;
+       u16 offset = 0, typelength, i;
+       u8 *buf = tlv->tlvinfo;
+       u32 ouisubtype;
+
+       typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+                       I40E_IEEE_ETS_TLV_LENGTH);
+       tlv->typelength = htons(typelength);
+
+       ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+                       I40E_IEEE_SUBTYPE_ETS_CFG);
+       tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+       /* First Octet post subtype
+        * --------------------------
+        * |will-|CBS  | Re-  | Max |
+        * |ing  |     |served| TCs |
+        * --------------------------
+        * |1bit | 1bit|3 bits|3bits|
+        */
+       etscfg = &dcbcfg->etscfg;
+       if (etscfg->willing)
+               maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+       maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+       buf[offset] = maxtcwilling;
+
+       /* Move offset to Priority Assignment Table */
+       offset++;
+
+       /* Priority Assignment Table (4 octets)
+        * Octets:|    1    |    2    |    3    |    4    |
+        *        -----------------------------------------
+        *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+        *        -----------------------------------------
+        *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+        *        -----------------------------------------
+        */
+       for (i = 0; i < 4; i++) {
+               priority0 = etscfg->prioritytable[i * 2] & 0xF;
+               priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+               buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+                               priority1;
+               offset++;
+       }
+
+       /* TC Bandwidth Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               buf[offset++] = etscfg->tcbwtable[i];
+
+       /* TSA Assignment Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+                                    struct i40e_dcbx_config *dcbcfg)
+{
+       struct i40e_dcb_ets_config *etsrec;
+       u16 offset = 0, typelength, i;
+       u8 priority0, priority1;
+       u8 *buf = tlv->tlvinfo;
+       u32 ouisubtype;
+
+       typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+                       I40E_IEEE_ETS_TLV_LENGTH);
+       tlv->typelength = htons(typelength);
+
+       ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+                       I40E_IEEE_SUBTYPE_ETS_REC);
+       tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+       etsrec = &dcbcfg->etsrec;
+       /* First Octet is reserved */
+       /* Move offset to Priority Assignment Table */
+       offset++;
+
+       /* Priority Assignment Table (4 octets)
+        * Octets:|    1    |    2    |    3    |    4    |
+        *        -----------------------------------------
+        *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+        *        -----------------------------------------
+        *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+        *        -----------------------------------------
+        */
+       for (i = 0; i < 4; i++) {
+               priority0 = etsrec->prioritytable[i * 2] & 0xF;
+               priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+               buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+                               priority1;
+               offset++;
+       }
+
+       /* TC Bandwidth Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               buf[offset++] = etsrec->tcbwtable[i];
+
+       /* TSA Assignment Table (8 octets)
+        * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+        *        ---------------------------------
+        *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+        *        ---------------------------------
+        */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               buf[offset++] = etsrec->tsatable[i];
+}
+
+ /**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+                                 struct i40e_dcbx_config *dcbcfg)
+{
+       u8 *buf = tlv->tlvinfo;
+       u32 ouisubtype;
+       u16 typelength;
+
+       typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+                       I40E_IEEE_PFC_TLV_LENGTH);
+       tlv->typelength = htons(typelength);
+
+       ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+                       I40E_IEEE_SUBTYPE_PFC_CFG);
+       tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+       /* ----------------------------------------
+        * |will-|MBC  | Re-  | PFC |  PFC Enable  |
+        * |ing  |     |served| cap |              |
+        * -----------------------------------------
+        * |1bit | 1bit|2 bits|4bits| 1 octet      |
+        */
+       if (dcbcfg->pfc.willing)
+               buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+       if (dcbcfg->pfc.mbc)
+               buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+       buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+       buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv -  Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+                                     struct i40e_dcbx_config *dcbcfg)
+{
+       u16 typelength, length, offset = 0;
+       u8 priority, selector, i = 0;
+       u8 *buf = tlv->tlvinfo;
+       u32 ouisubtype;
+
+       /* No APP TLVs then just return */
+       if (dcbcfg->numapps == 0)
+               return;
+       ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+                       I40E_IEEE_SUBTYPE_APP_PRI);
+       tlv->ouisubtype = I40E_HTONL(ouisubtype);
+
+       /* Move offset to App Priority Table */
+       offset++;
+       /* Application Priority Table (3 octets)
+        * Octets:|         1          |    2    |    3    |
+        *        -----------------------------------------
+        *        |Priority|Rsrvd| Sel |    Protocol ID    |
+        *        -----------------------------------------
+        *   Bits:|23    21|20 19|18 16|15                0|
+        *        -----------------------------------------
+        */
+       while (i < dcbcfg->numapps) {
+               priority = dcbcfg->app[i].priority & 0x7;
+               selector = dcbcfg->app[i].selector & 0x7;
+               buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+               buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+               buf[offset + 2] =  dcbcfg->app[i].protocolid & 0xFF;
+               /* Move to next app */
+               offset += 3;
+               i++;
+               if (i >= I40E_DCBX_MAX_APPS)
+                       break;
+       }
+       /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+       length = sizeof(tlv->ouisubtype) + 1 + (i*3);
+       typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+               (length & 0x1FF));
+       tlv->typelength = htons(typelength);
+}
+
+ /**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ *
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+                            struct i40e_dcbx_config *dcbcfg,
+                            u16 tlvid)
+{
+       switch (tlvid) {
+       case I40E_IEEE_TLV_ID_ETS_CFG:
+               i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+               break;
+       case I40E_IEEE_TLV_ID_ETS_REC:
+               i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+               break;
+       case I40E_IEEE_TLV_ID_PFC_CFG:
+               i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+               break;
+       case I40E_IEEE_TLV_ID_APP_PRI:
+               i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+               break;
+       default:
+               break;
+       }
+}
+
+ /**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+{
+       i40e_status ret = I40E_SUCCESS;
+       struct i40e_dcbx_config *dcbcfg;
+       struct i40e_virt_mem mem;
+       u8 mib_type, *lldpmib;
+       u16 miblen;
+
+       /* update the hw local config */
+       dcbcfg = &hw->local_dcbx_config;
+       /* Allocate the LLDPDU */
+       ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+       if (ret)
+               return ret;
+
+       mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+       if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+               mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+                           SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+       }
+       lldpmib = (u8 *)mem.va;
+       ret = i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+       ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+       i40e_free_virt_mem(hw, &mem);
+       return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @hw: pointer to the hw struct
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+                                             struct i40e_dcbx_config *dcbcfg)
+{
+       u16 length, offset = 0, tlvid = I40E_TLV_ID_START;
+       i40e_status ret = I40E_SUCCESS;
+       struct i40e_lldp_org_tlv *tlv;
+       u16 type, typelength;
+
+       tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+       while (1) {
+               i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+               typelength = ntohs(tlv->typelength);
+               type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+                               I40E_LLDP_TLV_TYPE_SHIFT);
+               length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+                               I40E_LLDP_TLV_LEN_SHIFT);
+               if (length)
+                       offset += length + 2;
+               /* END TLV or beyond LLDPDU size */
+               if ((tlvid >= I40E_TLV_ID_END_OF_LLDPPDU) ||
+                   (offset > I40E_LLDPDU_SIZE))
+                       break;
+               /* Move to next TLV */
+               if (length)
+                       tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+                             sizeof(tlv->typelength) + length);
+       }
+       *miblen = offset;
+       return ret;
+}
+
+/**
+ * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
+ * @hw: pointer to the HW structure
+ * @lldp_cfg: pointer to hold lldp configuration variables
+ *
+ * Reads the LLDP configuration data from NVM
+ **/
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+                                        struct i40e_lldp_variables *lldp_cfg)
+{
+       i40e_status ret = I40E_SUCCESS;
+       u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR);
+
+       if (!lldp_cfg)
+               return I40E_ERR_PARAM;
+
+       ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+       if (ret != I40E_SUCCESS)
+               goto err_lldp_cfg;
+
+       ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset,
+                              sizeof(struct i40e_lldp_variables),
+                              (u8 *)lldp_cfg,
+                              true, NULL);
+       i40e_release_nvm(hw);
+
+err_lldp_cfg:
+       return ret;
+}
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb.h
new file mode 100644 (file)
index 0000000..0c000c9
--- /dev/null
@@ -0,0 +1,215 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_OFFLOAD_DISABLED     0
+#define I40E_DCBX_OFFLOAD_ENABLED      1
+
+#define I40E_DCBX_STATUS_NOT_STARTED   0
+#define I40E_DCBX_STATUS_IN_PROGRESS   1
+#define I40E_DCBX_STATUS_DONE          2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS        3
+#define I40E_DCBX_STATUS_DISABLED      7
+
+#define I40E_TLV_TYPE_END              0
+#define I40E_TLV_TYPE_ORG              127
+
+#define I40E_IEEE_8021QAZ_OUI          0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG      9
+#define I40E_IEEE_SUBTYPE_ETS_REC      10
+#define I40E_IEEE_SUBTYPE_PFC_CFG      11
+#define I40E_IEEE_SUBTYPE_APP_PRI      12
+
+#define I40E_CEE_DCBX_OUI              0x001b21
+#define I40E_CEE_DCBX_TYPE             2
+
+#define I40E_CEE_SUBTYPE_CTRL          1
+#define I40E_CEE_SUBTYPE_PG_CFG                2
+#define I40E_CEE_SUBTYPE_PFC_CFG       3
+#define I40E_CEE_SUBTYPE_APP_PRI       4
+
+#define I40E_CEE_MAX_FEAT_TYPE         3
+#define I40E_LLDP_ADMINSTATUS_DISABLED         0
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RX       1
+#define I40E_LLDP_ADMINSTATUS_ENABLED_TX       2
+#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX     3
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_TLV_LEN_SHIFT                0
+#define I40E_LLDP_TLV_LEN_MASK         (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT       9
+#define I40E_LLDP_TLV_TYPE_MASK                (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT    0
+#define I40E_LLDP_TLV_SUBTYPE_MASK     (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT                8
+#define I40E_LLDP_TLV_OUI_MASK         (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT      0
+#define I40E_IEEE_ETS_MAXTC_MASK       (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT                6
+#define I40E_IEEE_ETS_CBS_MASK         BIT(I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT    7
+#define I40E_IEEE_ETS_WILLING_MASK     BIT(I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT     0
+#define I40E_IEEE_ETS_PRIO_0_MASK      (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT     4
+#define I40E_IEEE_ETS_PRIO_1_MASK      (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_PRIO_0_SHIFT     0
+#define I40E_CEE_PGID_PRIO_0_MASK      (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
+#define I40E_CEE_PGID_PRIO_1_SHIFT     4
+#define I40E_CEE_PGID_PRIO_1_MASK      (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
+#define I40E_CEE_PGID_STRICT           15
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT           0
+#define I40E_IEEE_TSA_CBS              1
+#define I40E_IEEE_TSA_ETS              2
+#define I40E_IEEE_TSA_VENDOR           255
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT                0
+#define I40E_IEEE_PFC_CAP_MASK         (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT                6
+#define I40E_IEEE_PFC_MBC_MASK         BIT(I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT    7
+#define I40E_IEEE_PFC_WILLING_MASK     BIT(I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT                0
+#define I40E_IEEE_APP_SEL_MASK         (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT       5
+#define I40E_IEEE_APP_PRIO_MASK                (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID         0
+#define I40E_TLV_ID_PORT_ID            1
+#define I40E_TLV_ID_TIME_TO_LIVE       2
+#define I40E_IEEE_TLV_ID_ETS_CFG       3
+#define I40E_IEEE_TLV_ID_ETS_REC       4
+#define I40E_IEEE_TLV_ID_PFC_CFG       5
+#define I40E_IEEE_TLV_ID_APP_PRI       6
+#define I40E_TLV_ID_END_OF_LLDPPDU     7
+#define I40E_TLV_ID_START              I40E_IEEE_TLV_ID_ETS_CFG
+
+#define I40E_IEEE_ETS_TLV_LENGTH       25
+#define I40E_IEEE_PFC_TLV_LENGTH       6
+#define I40E_IEEE_APP_TLV_LENGTH       11
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP TLV structure */
+struct i40e_lldp_generic_tlv {
+       __be16 typelength;
+       u8 tlvinfo[1];
+};
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+       __be16 typelength;
+       __be32 ouisubtype;
+       u8 tlvinfo[1];
+};
+
+struct i40e_cee_tlv_hdr {
+       __be16 typelen;
+       u8 operver;
+       u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+       struct i40e_cee_tlv_hdr hdr;
+       __be32 seqno;
+       __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+       struct i40e_cee_tlv_hdr hdr;
+       u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK  0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK     0x20
+       u8 subtype;
+       u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+       __be16 protocol;
+       u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK     0x03
+       __be16 lower_oui;
+       u8 prio_map;
+};
+#pragma pack()
+
+/*
+ * TODO: The below structures related LLDP/DCBX variables
+ * and statistics are defined but need to find how to get
+ * the required information from the Firmware to use them
+ */
+
+/* IEEE 802.1AB LLDP Agent Statistics */
+struct i40e_lldp_stats {
+       u64 remtablelastchangetime;
+       u64 remtableinserts;
+       u64 remtabledeletes;
+       u64 remtabledrops;
+       u64 remtableageouts;
+       u64 txframestotal;
+       u64 rxframesdiscarded;
+       u64 rxportframeerrors;
+       u64 rxportframestotal;
+       u64 rxporttlvsdiscardedtotal;
+       u64 rxporttlvsunrecognizedtotal;
+       u64 remtoomanyneighbors;
+};
+
+/* IEEE 802.1Qaz DCBX variables */
+struct i40e_dcbx_variables {
+       u32 defmaxtrafficclasses;
+       u32 defprioritytcmapping;
+       u32 deftcbandwidth;
+       u32 deftsaassignment;
+};
+
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
+                                          u16 *status);
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+                                             struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+                                            u8 bridgetype,
+                                            struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw);
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+                                             struct i40e_dcbx_config *dcbcfg);
+
+#endif /* _I40E_DCB_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb_nl.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_dcb_nl.c
new file mode 100644 (file)
index 0000000..b5d6f6c
--- /dev/null
@@ -0,0 +1,322 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_DCB
+#include "i40e.h"
+#include <net/dcbnl.h>
+
+#ifdef HAVE_DCBNL_IEEE
+/**
+ * i40e_get_pfc_delay - retrieve PFC Link Delay
+ * @hw: pointer to hardware struct
+ * @delay: holds the PFC Link delay value
+ *
+ * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA
+ **/
+static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
+{
+       u32 val;
+
+       val = rd32(hw, I40E_PRTDCB_GENC);
+       *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
+                      I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+}
+
+/**
+ * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Returns local IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_getets(struct net_device *dev,
+                                 struct ieee_ets *ets)
+{
+       struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+       struct i40e_dcbx_config *dcbxcfg;
+       struct i40e_hw *hw = &pf->hw;
+
+       if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+               return -EINVAL;
+
+       dcbxcfg = &hw->local_dcbx_config;
+       ets->willing = dcbxcfg->etscfg.willing;
+       ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+       ets->cbs = dcbxcfg->etscfg.cbs;
+       memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
+               sizeof(ets->tc_tx_bw));
+       memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable,
+               sizeof(ets->tc_rx_bw));
+       memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable,
+               sizeof(ets->tc_tsa));
+       memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable,
+               sizeof(ets->prio_tc));
+       memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+               sizeof(ets->tc_reco_bw));
+       memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+               sizeof(ets->tc_reco_tsa));
+       memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable,
+               sizeof(ets->reco_prio_tc));
+
+       return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the PFC information
+ *
+ * Returns local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
+                                 struct ieee_pfc *pfc)
+{
+       struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+       struct i40e_dcbx_config *dcbxcfg;
+       struct i40e_hw *hw = &pf->hw;
+       int i;
+
+       if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+               return -EINVAL;
+
+       dcbxcfg = &hw->local_dcbx_config;
+       pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+       pfc->pfc_en = dcbxcfg->pfc.pfcenable;
+       pfc->mbc = dcbxcfg->pfc.mbc;
+       i40e_get_pfc_delay(hw, &pfc->delay);
+
+       /* Get Requests/Indicatiosn */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+               pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_dcbnl_getdcbx - retrieve current DCBx capability
+ * @netdev: the corresponding netdev
+ *
+ * Returns DCBx capability features
+ **/
+static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
+{
+       struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+       return pf->dcbx_cap;
+}
+
+/**
+ * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
+ * @netdev: the corresponding netdev
+ *
+ * Returns the SAN MAC address used for LLDP exchange
+ **/
+static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
+                                       u8 *perm_addr)
+{
+       struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+       int i, j;
+
+       memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+       for (i = 0; i < dev->addr_len; i++)
+               perm_addr[i] = pf->hw.mac.perm_addr[i];
+
+       for (j = 0; j < dev->addr_len; j++, i++)
+               perm_addr[i] = pf->hw.mac.san_addr[j];
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+       .ieee_getets    = i40e_dcbnl_ieee_getets,
+       .ieee_getpfc    = i40e_dcbnl_ieee_getpfc,
+       .getdcbx        = i40e_dcbnl_getdcbx,
+       .getpermhwaddr  = i40e_dcbnl_get_perm_hw_addr,
+};
+
+/**
+ * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config
+ * @vsi: the corresponding vsi
+ *
+ * Set up all the IEEE APPs in the DCBNL App Table and generate event for
+ * other settings
+ **/
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
+{
+       struct net_device *dev = vsi->netdev;
+       struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+       struct i40e_dcbx_config *dcbxcfg;
+       struct i40e_hw *hw = &pf->hw;
+       struct dcb_app sapp;
+       u8 prio, tc_map;
+       int i;
+
+       /* DCB not enabled */
+       if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+               return;
+
+       /* MFP mode but not an iSCSI PF so return */
+       if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+               return;
+
+       dcbxcfg = &hw->local_dcbx_config;
+
+       /* Set up all the App TLVs if DCBx is negotiated */
+       for (i = 0; i < dcbxcfg->numapps; i++) {
+               prio = dcbxcfg->app[i].priority;
+               tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
+
+               /* Add APP only if the TC is enabled for this VSI */
+               if (tc_map & vsi->tc_config.enabled_tc) {
+                       sapp.selector = dcbxcfg->app[i].selector;
+                       sapp.protocol = dcbxcfg->app[i].protocolid;
+                       sapp.priority = prio;
+                       dcb_ieee_setapp(dev, &sapp);
+               }
+       }
+
+       /* Notify user-space of the changes */
+       dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * i40e_dcbnl_vsi_del_app - Delete APP for given VSI
+ * @vsi: the corresponding vsi
+ * @app: APP to delete
+ *
+ * Delete given APP from the DCBNL APP table for given
+ * VSI
+ **/
+static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
+                                 struct i40e_dcb_app_priority_table *app)
+{
+       struct net_device *dev = vsi->netdev;
+       struct dcb_app sapp;
+
+       if (!dev)
+               return -EINVAL;
+
+       sapp.selector = app->selector;
+       sapp.protocol = app->protocolid;
+       sapp.priority = app->priority;
+       return dcb_ieee_delapp(dev, &sapp);
+}
+
+/**
+ * i40e_dcbnl_del_app - Delete APP on all VSIs
+ * @pf: the corresponding PF
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ **/
+static void i40e_dcbnl_del_app(struct i40e_pf *pf,
+                             struct i40e_dcb_app_priority_table *app)
+{
+       int v, err;
+
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               if (pf->vsi[v] && pf->vsi[v]->netdev) {
+                       err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+                       dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+                               pf->vsi[v]->seid, err, app->selector,
+                               app->protocolid, app->priority);
+               }
+       }
+}
+
+/**
+ * i40e_dcbnl_find_app - Search APP in given DCB config
+ * @cfg: DCBX configuration data
+ * @app: APP to search for
+ *
+ * Find given APP in the DCB configuration
+ **/
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+                               struct i40e_dcb_app_priority_table *app)
+{
+       int i;
+
+       for (i = 0; i < cfg->numapps; i++) {
+               if (app->selector == cfg->app[i].selector &&
+                   app->protocolid == cfg->app[i].protocolid &&
+                   app->priority == cfg->app[i].priority)
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * i40e_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding PF
+ * @old_cfg: old DCBX configuration data
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPs that are not present in the passed
+ * DCB configuration
+ **/
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+                          struct i40e_dcbx_config *old_cfg,
+                          struct i40e_dcbx_config *new_cfg)
+{
+       struct i40e_dcb_app_priority_table app;
+       int i;
+
+       /* MFP mode but not an iSCSI PF so return */
+       if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+               return;
+
+       for (i = 0; i < old_cfg->numapps; i++) {
+               app = old_cfg->app[i];
+               /* The APP is not available anymore delete it */
+               if (!i40e_dcbnl_find_app(new_cfg, &app))
+                       i40e_dcbnl_del_app(pf, &app);
+       }
+}
+
+/**
+ * i40e_dcbnl_setup - DCBNL setup
+ * @vsi: the corresponding vsi
+ *
+ * Set up DCBNL ops and initial APP TLVs
+ **/
+void i40e_dcbnl_setup(struct i40e_vsi *vsi)
+{
+       struct net_device *dev = vsi->netdev;
+       struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+       /* Not DCB capable */
+       if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+               return;
+
+       dev->dcbnl_ops = &dcbnl_ops;
+
+       /* Set initial IEEE DCB settings */
+       i40e_dcbnl_set_all(vsi);
+}
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_debugfs.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_debugfs.c
new file mode 100644 (file)
index 0000000..54b5d05
--- /dev/null
@@ -0,0 +1,2618 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+
+#include "i40e.h"
+
+static struct dentry *i40e_dbg_root;
+
+/**
+ * i40e_dbg_find_vsi - searches for the vsi with the given seid
+ * @pf - the PF structure to search for the vsi
+ * @seid - seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
+{
+       int i;
+
+       if (seid < 0)
+               dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+       else
+               for (i = 0; i < pf->num_alloc_vsi; i++)
+                       if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
+                               return pf->vsi[i];
+
+       return NULL;
+}
+
+/**
+ * i40e_dbg_find_veb - searches for the veb with the given seid
+ * @pf - the PF structure to search for the veb
+ * @seid - seid of the veb it is searching for
+ **/
+static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
+{
+       int i;
+
+       if ((seid < I40E_BASE_VEB_SEID) ||
+           (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB)))
+               dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+       else
+               for (i = 0; i < I40E_MAX_VEB; i++)
+                       if (pf->veb[i] && pf->veb[i]->seid == seid)
+                               return pf->veb[i];
+       return NULL;
+}
+
+/**************************************************************
+ * dump
+ * The dump entry in debugfs is for getting a data snapshow of
+ * the driver's current configuration and runtime details.
+ * When the filesystem entry is written, a snapshot is taken.
+ * When the entry is read, the most recent snapshot data is dumped.
+ **************************************************************/
+static char *i40e_dbg_dump_buf;
+static ssize_t i40e_dbg_dump_data_len;
+static ssize_t i40e_dbg_dump_buffer_len;
+
+/**
+ * i40e_dbg_dump_read - read the dump data
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
+                                 size_t count, loff_t *ppos)
+{
+       int bytes_not_copied;
+       int len;
+
+       /* is *ppos bigger than the available data? */
+       if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf)
+               return 0;
+
+       /* be sure to not read beyond the end of available data */
+       len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos));
+
+       bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+
+       *ppos += len;
+       return len;
+}
+
+/**
+ * i40e_dbg_prep_dump_buf
+ * @pf: the PF we're working with
+ * @buflen: the desired buffer length
+ *
+ * Return positive if success, 0 if failed
+ **/
+static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen)
+{
+       /* if not already big enough, prep for re alloc */
+       if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) {
+               kfree(i40e_dbg_dump_buf);
+               i40e_dbg_dump_buffer_len = 0;
+               i40e_dbg_dump_buf = NULL;
+       }
+
+       /* get a new buffer if needed */
+       if (!i40e_dbg_dump_buf) {
+               i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL);
+               if (i40e_dbg_dump_buf != NULL)
+                       i40e_dbg_dump_buffer_len = buflen;
+       }
+
+       return i40e_dbg_dump_buffer_len;
+}
+
+/**
+ * i40e_dbg_dump_write - trigger a datadump snapshot
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ *
+ * Any write clears the stats
+ **/
+static ssize_t i40e_dbg_dump_write(struct file *filp,
+                                  const char __user *buffer,
+                                  size_t count, loff_t *ppos)
+{
+       struct i40e_pf *pf = filp->private_data;
+       bool seid_found = false;
+       long seid = -1;
+       int buflen = 0;
+       int i, ret;
+       int len;
+       u8 *p;
+
+       /* don't allow partial writes */
+       if (*ppos != 0)
+               return 0;
+
+       /* decode the SEID given to be dumped */
+       ret = kstrtol_from_user(buffer, count, 0, &seid);
+
+       if (ret) {
+               dev_info(&pf->pdev->dev, "bad seid value\n");
+       } else if (seid == 0) {
+               seid_found = true;
+
+               kfree(i40e_dbg_dump_buf);
+               i40e_dbg_dump_buffer_len = 0;
+               i40e_dbg_dump_data_len = 0;
+               i40e_dbg_dump_buf = NULL;
+               dev_info(&pf->pdev->dev, "debug buffer freed\n");
+
+       } else if (seid == pf->pf_seid || seid == 1) {
+               seid_found = true;
+
+               buflen = sizeof(struct i40e_pf);
+               buflen += (sizeof(struct i40e_aq_desc)
+                    * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries));
+
+               if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+                       p = i40e_dbg_dump_buf;
+
+                       len = sizeof(struct i40e_pf);
+                       memcpy(p, pf, len);
+                       p += len;
+
+                       len = (sizeof(struct i40e_aq_desc)
+                                       * pf->hw.aq.num_asq_entries);
+                       memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
+                       p += len;
+
+                       len = (sizeof(struct i40e_aq_desc)
+                                       * pf->hw.aq.num_arq_entries);
+                       memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
+                       p += len;
+
+                       i40e_dbg_dump_data_len = buflen;
+                       dev_info(&pf->pdev->dev,
+                                "PF seid %ld dumped %d bytes\n",
+                                seid, (int)i40e_dbg_dump_data_len);
+               }
+       } else if (seid >= I40E_BASE_VSI_SEID) {
+               struct i40e_vsi *vsi = NULL;
+               struct i40e_mac_filter *f;
+               int filter_count = 0;
+
+               mutex_lock(&pf->switch_mutex);
+               vsi = i40e_dbg_find_vsi(pf, seid);
+               if (!vsi) {
+                       mutex_unlock(&pf->switch_mutex);
+                       goto write_exit;
+               }
+
+               buflen = sizeof(struct i40e_vsi);
+               buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors;
+               buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs;
+               buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs;
+               buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs;
+               list_for_each_entry(f, &vsi->mac_filter_list, list)
+                       filter_count++;
+               buflen += sizeof(struct i40e_mac_filter) * filter_count;
+
+               if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+                       p = i40e_dbg_dump_buf;
+                       seid_found = true;
+
+                       len = sizeof(struct i40e_vsi);
+                       memcpy(p, vsi, len);
+                       p += len;
+
+                       if (vsi->num_q_vectors) {
+                               len = (sizeof(struct i40e_q_vector)
+                                       * vsi->num_q_vectors);
+                               memcpy(p, vsi->q_vectors, len);
+                               p += len;
+                       }
+
+                       if (vsi->num_queue_pairs) {
+                               len = (sizeof(struct i40e_ring) *
+                                     vsi->num_queue_pairs);
+                               memcpy(p, vsi->tx_rings, len);
+                               p += len;
+                               memcpy(p, vsi->rx_rings, len);
+                               p += len;
+                       }
+
+                       if (vsi->tx_rings[0]) {
+                               len = sizeof(struct i40e_tx_buffer);
+                               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                                       memcpy(p, vsi->tx_rings[i]->tx_bi, len);
+                                       p += len;
+                               }
+                               len = sizeof(struct i40e_rx_buffer);
+                               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                                       memcpy(p, vsi->rx_rings[i]->rx_bi, len);
+                                       p += len;
+                               }
+                       }
+
+                       /* macvlan filter list */
+                       len = sizeof(struct i40e_mac_filter);
+                       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+                               memcpy(p, f, len);
+                               p += len;
+                       }
+
+                       i40e_dbg_dump_data_len = buflen;
+                       dev_info(&pf->pdev->dev,
+                                "VSI seid %ld dumped %d bytes\n",
+                                seid, (int)i40e_dbg_dump_data_len);
+               }
+               mutex_unlock(&pf->switch_mutex);
+       } else if (seid >= I40E_BASE_VEB_SEID) {
+               struct i40e_veb *veb = NULL;
+
+               mutex_lock(&pf->switch_mutex);
+               veb = i40e_dbg_find_veb(pf, seid);
+               if (!veb) {
+                       mutex_unlock(&pf->switch_mutex);
+                       goto write_exit;
+               }
+
+               buflen = sizeof(struct i40e_veb);
+               if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+                       seid_found = true;
+                       memcpy(i40e_dbg_dump_buf, veb, buflen);
+                       i40e_dbg_dump_data_len = buflen;
+                       dev_info(&pf->pdev->dev,
+                                "VEB seid %ld dumped %d bytes\n",
+                                seid, (int)i40e_dbg_dump_data_len);
+               }
+               mutex_unlock(&pf->switch_mutex);
+       }
+
+write_exit:
+       if (!seid_found)
+               dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid);
+
+       return count;
+}
+
+static const struct file_operations i40e_dbg_dump_fops = {
+       .owner = THIS_MODULE,
+       .open =  simple_open,
+       .read =  i40e_dbg_dump_read,
+       .write = i40e_dbg_dump_write,
+};
+
+/**************************************************************
+ * command
+ * The command entry in debugfs is for giving the driver commands
+ * to be executed - these may be for changing the internal switch
+ * setup, adding or removing filters, or other things.  Many of
+ * these will be useful for some forms of unit testing.
+ **************************************************************/
+static char i40e_dbg_command_buf[256] = "";
+
+/**
+ * i40e_dbg_command_read - read for command datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
+                                    size_t count, loff_t *ppos)
+{
+       struct i40e_pf *pf = filp->private_data;
+       int bytes_not_copied;
+       int buf_size = 256;
+       char *buf;
+       int len;
+
+       /* don't allow partial reads */
+       if (*ppos != 0)
+               return 0;
+       if (count < buf_size)
+               return -ENOSPC;
+
+       buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!buf)
+               return -ENOSPC;
+
+       len = snprintf(buf, buf_size, "%s: %s\n",
+                      pf->vsi[pf->lan_vsi]->netdev->name,
+                      i40e_dbg_command_buf);
+
+       bytes_not_copied = copy_to_user(buffer, buf, len);
+       kfree(buf);
+
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+
+       *ppos = len;
+       return len;
+}
+
+/**
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
+{
+#ifdef HAVE_NDO_GET_STATS64
+       struct rtnl_link_stats64 *nstat;
+#else
+       struct net_device_stats *nstat;
+#endif
+       struct i40e_mac_filter *f;
+       struct i40e_vsi *vsi;
+       int i;
+
+       vsi = i40e_dbg_find_vsi(pf, seid);
+       if (!vsi) {
+               dev_info(&pf->pdev->dev,
+                        "dump %d: seid not found\n", seid);
+               return;
+       }
+       dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
+       if (vsi->netdev)
+               dev_info(&pf->pdev->dev,
+                        "    netdev: name = %s\n",
+                        vsi->netdev->name);
+#ifdef HAVE_VLAN_RX_REGISTER
+       if (vsi->vlgrp)
+               dev_info(&pf->pdev->dev,
+                        "    vlgrp: & = %p\n", vsi->vlgrp);
+#else
+       if (vsi->active_vlans)
+               dev_info(&pf->pdev->dev,
+                        "    vlgrp: & = %p\n", vsi->active_vlans);
+#endif /* HAVE_VLAN_RX_REGISTER */
+       dev_info(&pf->pdev->dev,
+                "    netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
+                vsi->netdev_registered,
+                vsi->current_netdev_flags, vsi->state, vsi->flags);
+       if (vsi == pf->vsi[pf->lan_vsi])
+               dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+                        pf->hw.mac.addr,
+                        pf->hw.mac.san_addr,
+                        pf->hw.mac.port_addr);
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               dev_info(&pf->pdev->dev,
+                        "    mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
+                        f->macaddr, f->vlan, f->is_netdev, f->is_vf,
+                        f->counter);
+       }
+       nstat = i40e_get_vsi_stats_struct(vsi);
+       dev_info(&pf->pdev->dev,
+                "    net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+                (unsigned long int)nstat->rx_packets,
+                (unsigned long int)nstat->rx_bytes,
+                (unsigned long int)nstat->rx_errors,
+                (unsigned long int)nstat->rx_dropped);
+       dev_info(&pf->pdev->dev,
+                "    net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+                (unsigned long int)nstat->tx_packets,
+                (unsigned long int)nstat->tx_bytes,
+                (unsigned long int)nstat->tx_errors,
+                (unsigned long int)nstat->tx_dropped);
+       dev_info(&pf->pdev->dev,
+                "    net_stats: multicast = %lu, collisions = %lu\n",
+                (unsigned long int)nstat->multicast,
+                (unsigned long int)nstat->collisions);
+       dev_info(&pf->pdev->dev,
+                "    net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+                (unsigned long int)nstat->rx_length_errors,
+                (unsigned long int)nstat->rx_over_errors,
+                (unsigned long int)nstat->rx_crc_errors);
+       dev_info(&pf->pdev->dev,
+                "    net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+                (unsigned long int)nstat->rx_frame_errors,
+                (unsigned long int)nstat->rx_fifo_errors,
+                (unsigned long int)nstat->rx_missed_errors);
+       dev_info(&pf->pdev->dev,
+                "    net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+                (unsigned long int)nstat->tx_aborted_errors,
+                (unsigned long int)nstat->tx_carrier_errors,
+                (unsigned long int)nstat->tx_fifo_errors);
+       dev_info(&pf->pdev->dev,
+                "    net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+                (unsigned long int)nstat->tx_heartbeat_errors,
+                (unsigned long int)nstat->tx_window_errors);
+       dev_info(&pf->pdev->dev,
+                "    net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
+                (unsigned long int)nstat->rx_compressed,
+                (unsigned long int)nstat->tx_compressed);
+       dev_info(&pf->pdev->dev,
+                "    net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+                (unsigned long int)vsi->net_stats_offsets.rx_packets,
+                (unsigned long int)vsi->net_stats_offsets.rx_bytes,
+                (unsigned long int)vsi->net_stats_offsets.rx_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_dropped);
+       dev_info(&pf->pdev->dev,
+                "    net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+                (unsigned long int)vsi->net_stats_offsets.tx_packets,
+                (unsigned long int)vsi->net_stats_offsets.tx_bytes,
+                (unsigned long int)vsi->net_stats_offsets.tx_errors,
+                (unsigned long int)vsi->net_stats_offsets.tx_dropped);
+       dev_info(&pf->pdev->dev,
+                "    net_stats_offsets: multicast = %lu, collisions = %lu\n",
+                (unsigned long int)vsi->net_stats_offsets.multicast,
+                (unsigned long int)vsi->net_stats_offsets.collisions);
+       dev_info(&pf->pdev->dev,
+                "    net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+                (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
+       dev_info(&pf->pdev->dev,
+                "    net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+                (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
+                (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
+       dev_info(&pf->pdev->dev,
+                "    net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+                (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
+                (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
+                (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
+       dev_info(&pf->pdev->dev,
+                "    net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+                (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
+                (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
+       dev_info(&pf->pdev->dev,
+                "    net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
+                (unsigned long int)vsi->net_stats_offsets.rx_compressed,
+                (unsigned long int)vsi->net_stats_offsets.tx_compressed);
+       dev_info(&pf->pdev->dev,
+                "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+                vsi->tx_restart, vsi->tx_busy,
+                vsi->rx_buf_failed, vsi->rx_page_failed);
+       rcu_read_lock();
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+
+               if (!rx_ring)
+                       continue;
+
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: desc = %p\n",
+                        i, rx_ring->desc);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+                        i, rx_ring->dev,
+                        rx_ring->netdev,
+                        rx_ring->rx_bi);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+                        i, rx_ring->state,
+                        rx_ring->queue_index,
+                        rx_ring->reg_idx);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+                        i, rx_ring->rx_hdr_len,
+                        rx_ring->rx_buf_len,
+                        rx_ring->dtype);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                        i, rx_ring->hsplit,
+                        rx_ring->next_to_use,
+                        rx_ring->next_to_clean,
+                        rx_ring->ring_active);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+                        i, rx_ring->stats.packets,
+                        rx_ring->stats.bytes,
+                        rx_ring->rx_stats.non_eop_descs);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
+                        i,
+                        rx_ring->rx_stats.alloc_page_failed,
+                        rx_ring->rx_stats.alloc_buff_failed);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+                        i, rx_ring->size,
+                        (unsigned long int)rx_ring->dma);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
+                        i, rx_ring->vsi,
+                        rx_ring->q_vector);
+       }
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+
+               if (!tx_ring)
+                       continue;
+
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: desc = %p\n",
+                        i, tx_ring->desc);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+                        i, tx_ring->dev,
+                        tx_ring->netdev,
+                        tx_ring->tx_bi);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+                        i, tx_ring->state,
+                        tx_ring->queue_index,
+                        tx_ring->reg_idx);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: dtype = %d\n",
+                        i, tx_ring->dtype);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                        i, tx_ring->hsplit,
+                        tx_ring->next_to_use,
+                        tx_ring->next_to_clean,
+                        tx_ring->ring_active);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+                        i, tx_ring->stats.packets,
+                        tx_ring->stats.bytes,
+                        tx_ring->tx_stats.restart_queue);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+                        i,
+                        tx_ring->tx_stats.tx_busy,
+                        tx_ring->tx_stats.tx_done_old);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+                        i, tx_ring->size,
+                        (unsigned long int)tx_ring->dma);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
+                        i, tx_ring->vsi,
+                        tx_ring->q_vector);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: DCB tc = %d\n",
+                        i, tx_ring->dcb_tc);
+       }
+       rcu_read_unlock();
+       dev_info(&pf->pdev->dev,
+                "    work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
+                vsi->work_limit, vsi->rx_itr_setting,
+                ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed",
+                vsi->tx_itr_setting,
+                ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
+       dev_info(&pf->pdev->dev,
+                "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
+                vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+       dev_info(&pf->pdev->dev,
+                "    num_q_vectors = %i, base_vector = %i\n",
+                vsi->num_q_vectors, vsi->base_vector);
+       dev_info(&pf->pdev->dev,
+                "    seid = %d, id = %d, uplink_seid = %d\n",
+                vsi->seid, vsi->id, vsi->uplink_seid);
+       dev_info(&pf->pdev->dev,
+                "    base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
+                vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
+       dev_info(&pf->pdev->dev, "    type = %i\n", vsi->type);
+       if (vsi->type == I40E_VSI_SRIOV)
+               dev_info(&pf->pdev->dev, "    VF ID = %i\n", vsi->vf_id);
+       dev_info(&pf->pdev->dev,
+                "    info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
+                vsi->info.valid_sections, vsi->info.switch_id);
+       dev_info(&pf->pdev->dev,
+                "    info: sw_reserved[] = 0x%02x 0x%02x\n",
+                vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
+       dev_info(&pf->pdev->dev,
+                "    info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
+                vsi->info.sec_flags, vsi->info.sec_reserved);
+       dev_info(&pf->pdev->dev,
+                "    info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
+                vsi->info.pvid, vsi->info.fcoe_pvid,
+                vsi->info.port_vlan_flags);
+       dev_info(&pf->pdev->dev,
+                "    info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
+                vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
+                vsi->info.pvlan_reserved[2]);
+       dev_info(&pf->pdev->dev,
+                "    info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
+                vsi->info.ingress_table, vsi->info.egress_table);
+       dev_info(&pf->pdev->dev,
+                "    info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
+                vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
+                vsi->info.cas_pv_reserved);
+       dev_info(&pf->pdev->dev,
+                "    info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+                vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
+                vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
+                vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
+                vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
+       dev_info(&pf->pdev->dev,
+                "    info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+                vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
+                vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
+                vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
+                vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
+       dev_info(&pf->pdev->dev,
+                "    info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+                vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
+                vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
+                vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
+                vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
+       dev_info(&pf->pdev->dev,
+                "    info: queueing_opt_flags = 0x%02x  queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
+                vsi->info.queueing_opt_flags,
+                vsi->info.queueing_opt_reserved[0],
+                vsi->info.queueing_opt_reserved[1],
+                vsi->info.queueing_opt_reserved[2]);
+       dev_info(&pf->pdev->dev,
+                "    info: up_enable_bits = 0x%02x\n",
+                vsi->info.up_enable_bits);
+       dev_info(&pf->pdev->dev,
+                "    info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
+                vsi->info.sched_reserved, vsi->info.outer_up_table);
+       dev_info(&pf->pdev->dev,
+                "    info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
+                vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
+                vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
+                vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
+                vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
+       dev_info(&pf->pdev->dev,
+                "    info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+                vsi->info.qs_handle[0], vsi->info.qs_handle[1],
+                vsi->info.qs_handle[2], vsi->info.qs_handle[3],
+                vsi->info.qs_handle[4], vsi->info.qs_handle[5],
+                vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
+       dev_info(&pf->pdev->dev,
+                "    info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
+                vsi->info.stat_counter_idx, vsi->info.sched_id);
+       dev_info(&pf->pdev->dev,
+                "    info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+                vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
+                vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
+                vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
+                vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
+                vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
+                vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
+       if (vsi->back)
+               dev_info(&pf->pdev->dev, "    PF = %p\n", vsi->back);
+       dev_info(&pf->pdev->dev, "    idx = %d\n", vsi->idx);
+       dev_info(&pf->pdev->dev,
+                "    tc_config: numtc = %d, enabled_tc = 0x%x\n",
+                vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev,
+                        "    tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
+                        i, vsi->tc_config.tc_info[i].qoffset,
+                        vsi->tc_config.tc_info[i].qcount,
+                        vsi->tc_config.tc_info[i].netdev_tc);
+       }
+       dev_info(&pf->pdev->dev,
+                "    bw: bw_limit = %d, bw_max_quanta = %d\n",
+                vsi->bw_limit, vsi->bw_max_quanta);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev,
+                        "    bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
+                        i, vsi->bw_ets_share_credits[i],
+                        vsi->bw_ets_limit_credits[i],
+                        vsi->bw_ets_max_quanta[i]);
+       }
+#ifdef I40E_FCOE
+       if (vsi->type == I40E_VSI_FCOE) {
+               dev_info(&pf->pdev->dev,
+                        "    fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n",
+                        vsi->fcoe_stats.rx_fcoe_packets,
+                        vsi->fcoe_stats.rx_fcoe_dwords,
+                        vsi->fcoe_stats.rx_fcoe_dropped);
+               dev_info(&pf->pdev->dev,
+                        "    fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n",
+                        vsi->fcoe_stats.tx_fcoe_packets,
+                        vsi->fcoe_stats.tx_fcoe_dwords);
+               dev_info(&pf->pdev->dev,
+                        "    fcoe_stats: bad_crc = %llu, last_error = %llu\n",
+                        vsi->fcoe_stats.fcoe_bad_fccrc,
+                        vsi->fcoe_stats.fcoe_last_error);
+               dev_info(&pf->pdev->dev, "    fcoe_stats: ddp_count = %llu\n",
+                        vsi->fcoe_stats.fcoe_ddp_count);
+       }
+#endif
+}
+
+/**
+ * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
+{
+       struct i40e_adminq_ring *ring;
+       struct i40e_hw *hw = &pf->hw;
+       char hdr[32];
+       int i;
+
+       snprintf(hdr, sizeof(hdr), "%s %s:         ",
+                dev_driver_string(&pf->pdev->dev),
+                dev_name(&pf->pdev->dev));
+
+       /* first the send (command) ring, then the receive (event) ring */
+       dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
+       ring = &(hw->aq.asq);
+       for (i = 0; i < ring->count; i++) {
+               struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
+               dev_info(&pf->pdev->dev,
+                        "   at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+                        i, d->flags, d->opcode, d->datalen, d->retval,
+                        d->cookie_high, d->cookie_low);
+               print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+                              16, 1, d->params.raw, 16, 0);
+       }
+
+       dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
+       ring = &(hw->aq.arq);
+       for (i = 0; i < ring->count; i++) {
+               struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
+               dev_info(&pf->pdev->dev,
+                        "   ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+                        i, d->flags, d->opcode, d->datalen, d->retval,
+                        d->cookie_high, d->cookie_low);
+               print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+                              16, 1, d->params.raw, 16, 0);
+       }
+}
+
+/**
+ * i40e_dbg_dump_desc - handles dump desc write into command datum
+ * @cnt: number of arguments that the user supplied
+ * @vsi_seid: vsi id entered by user
+ * @ring_id: ring id entered by user
+ * @desc_n: descriptor number entered by user
+ * @pf: the i40e_pf created in command write
+ * @is_rx_ring: true if rx, false if tx
+ **/
+static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
+                              struct i40e_pf *pf, bool is_rx_ring)
+{
+       struct i40e_tx_desc *txd;
+       union i40e_rx_desc *rxd;
+       struct i40e_ring ring;
+       struct i40e_vsi *vsi;
+       int i;
+
+       vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+       if (!vsi) {
+               dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
+               return;
+       }
+       if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
+               dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
+               return;
+       }
+       if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
+               dev_info(&pf->pdev->dev,
+                        "descriptor rings have not been allocated for vsi %d\n",
+                        vsi_seid);
+               return;
+       }
+       if (is_rx_ring)
+               ring = *vsi->rx_rings[ring_id];
+       else
+               ring = *vsi->tx_rings[ring_id];
+       if (cnt == 2) {
+               void *head = (struct i40e_tx_desc *)ring.desc + ring.count;
+               u32 tx_head = le32_to_cpu(*(volatile __le32 *)head);
+
+               dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
+                        vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
+               dev_info(&pf->pdev->dev, "head = %04x tail = %04x\n",
+                        is_rx_ring ? 0 : tx_head, readl(ring.tail));
+               dev_info(&pf->pdev->dev, "ntc = %04x ntu = %04x\n",
+                        ring.next_to_clean, ring.next_to_use);
+               for (i = 0; i < ring.count; i++) {
+                       if (!is_rx_ring) {
+                               txd = I40E_TX_DESC(&ring, i);
+                               dev_info(&pf->pdev->dev,
+                                        "   d[%03i] = 0x%016llx 0x%016llx\n",
+                                        i, txd->buffer_addr,
+                                        txd->cmd_type_offset_bsz);
+                       } else if (sizeof(union i40e_rx_desc) ==
+                                  sizeof(union i40e_16byte_rx_desc)) {
+                               rxd = I40E_RX_DESC(&ring, i);
+                               dev_info(&pf->pdev->dev,
+                                        "   d[%03i] = 0x%016llx 0x%016llx\n",
+                                        i, rxd->read.pkt_addr,
+                                        rxd->read.hdr_addr);
+                       } else {
+                               rxd = I40E_RX_DESC(&ring, i);
+                               dev_info(&pf->pdev->dev,
+                                        "   d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+                                        i, rxd->read.pkt_addr,
+                                        rxd->read.hdr_addr,
+                                        rxd->read.rsvd1, rxd->read.rsvd2);
+                       }
+               }
+       } else if (cnt == 3) {
+               if (desc_n >= ring.count || desc_n < 0) {
+                       dev_info(&pf->pdev->dev,
+                                "descriptor %d not found\n", desc_n);
+                       return;
+               }
+               if (!is_rx_ring) {
+                       txd = I40E_TX_DESC(&ring, desc_n);
+                       dev_info(&pf->pdev->dev,
+                                "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+                                vsi_seid, ring_id, desc_n,
+                                txd->buffer_addr, txd->cmd_type_offset_bsz);
+               } else if (sizeof(union i40e_rx_desc) ==
+                          sizeof(union i40e_16byte_rx_desc)) {
+                       rxd = I40E_RX_DESC(&ring, desc_n);
+                       dev_info(&pf->pdev->dev,
+                                "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+                                vsi_seid, ring_id, desc_n,
+                                rxd->read.pkt_addr, rxd->read.hdr_addr);
+               } else {
+                       rxd = I40E_RX_DESC(&ring, desc_n);
+                       dev_info(&pf->pdev->dev,
+                                "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+                                vsi_seid, ring_id, desc_n,
+                                rxd->read.pkt_addr, rxd->read.hdr_addr,
+                                rxd->read.rsvd1, rxd->read.rsvd2);
+               }
+       } else {
+               dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
+       }
+}
+
+/**
+ * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
+{
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i])
+                       dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
+                                i, pf->vsi[i]->seid);
+}
+
+/**
+ * i40e_dbg_dump_capabilities - handles dump capabilities request
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_capabilities(struct i40e_pf *pf)
+{
+       struct i40e_hw_capabilities *p;
+
+       p = (struct i40e_hw_capabilities *)&pf->hw.func_caps;
+       dev_info(&pf->pdev->dev, "  capabilities:\n");
+       dev_info(&pf->pdev->dev,
+                "    switch_mode = %d\tmgmt_mode = %d\tnpar = %d\tos2bmc = %d\n",
+                p->switch_mode, p->management_mode, p->npar_enable, p->os2bmc);
+       dev_info(&pf->pdev->dev,
+                "    valid_functions = 0x%04x\tsr_iov_1_1 = %d\tnum_vfs = %d\tvf_base_id = %d\n",
+                p->valid_functions, p->sr_iov_1_1, p->num_vfs, p->vf_base_id);
+       dev_info(&pf->pdev->dev, "    nvm_image_type = %d\n", p->nvm_image_type);
+       dev_info(&pf->pdev->dev,
+                "    num_vsis = %d\tvmdq = %d\tflex10_enable = %d\tflex10_capable = %d\n",
+                p->num_vsis, p->vmdq, p->flex10_enable, p->flex10_capable);
+       dev_info(&pf->pdev->dev,
+                "    evb_802_1_qbg = %d\tevb_802_1_qbh = %d\tmgmt_cem = %d\tieee_1588 = %d\n",
+                p->evb_802_1_qbg, p->evb_802_1_qbh, p->mgmt_cem, p->ieee_1588);
+       dev_info(&pf->pdev->dev,
+                "    fcoe = %d\tiwarp = %d\tmdio_port_num = %d\tmdio_port_mode = %d\n",
+                p->fcoe, p->iwarp, p->mdio_port_num, p->mdio_port_mode);
+       dev_info(&pf->pdev->dev,
+                "    dcb = %d\tenabled_tcmap = %d\tmaxtc = %d\tiscsi = %d\n",
+                p->dcb, p->enabled_tcmap, p->maxtc, p->iscsi);
+       dev_info(&pf->pdev->dev,
+                "    fd = %d\tfd_filters_guaranteed = %d\tfd_filters_best_effort = %d\tnum_flow_director_filters = %d\n",
+                p->fd, p->fd_filters_guaranteed, p->fd_filters_best_effort,
+                p->num_flow_director_filters);
+       dev_info(&pf->pdev->dev,
+                "    rss = %d\trss_table_size = %d\trss_table_entry_width = %d\n",
+                p->rss, p->rss_table_size, p->rss_table_entry_width);
+       dev_info(&pf->pdev->dev,
+                "    led[0] = %d\tsdp[0] = %d\tled_pin_num = %d\tsdp_pin_num = %d\n",
+                p->led[0], p->sdp[0], p->led_pin_num, p->sdp_pin_num);
+       dev_info(&pf->pdev->dev,
+                "    num_rx_qp = %d\tnum_tx_qp = %d\tbase_queue = %d\n",
+                p->num_rx_qp, p->num_tx_qp, p->base_queue);
+       dev_info(&pf->pdev->dev,
+                "    num_msix_vectors = %d\tnum_msix_vectors_vf = %d\trx_buf_chain_len = %d\n",
+                p->num_msix_vectors, p->num_msix_vectors_vf,
+                p->rx_buf_chain_len);
+}
+
+/**
+ * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @estats: the eth stats structure to be dumped
+ **/
+static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
+                                   struct i40e_eth_stats *estats)
+{
+       dev_info(&pf->pdev->dev, "  ethstats:\n");
+       dev_info(&pf->pdev->dev,
+                "    rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
+               estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
+       dev_info(&pf->pdev->dev,
+                "    rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
+                estats->rx_broadcast, estats->rx_discards);
+       dev_info(&pf->pdev->dev,
+                "    rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+                estats->rx_unknown_protocol, estats->tx_bytes);
+       dev_info(&pf->pdev->dev,
+                "    tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
+                estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
+       dev_info(&pf->pdev->dev,
+                "    tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
+                estats->tx_discards, estats->tx_errors);
+}
+
+/**
+ * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
+{
+       struct i40e_veb *veb;
+       int i;
+
+       if ((seid < I40E_BASE_VEB_SEID) ||
+           (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) {
+               dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+               return;
+       }
+
+       veb = i40e_dbg_find_veb(pf, seid);
+       if (!veb) {
+               dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
+               return;
+       }
+#ifdef HAVE_BRIDGE_ATTRIBS
+       dev_info(&pf->pdev->dev,
+                "veb idx=%d,%d stats_ic=%d  seid=%d uplink=%d mode=%s\n",
+                veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
+                veb->uplink_seid,
+                veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+#else
+       dev_info(&pf->pdev->dev,
+                "veb idx=%d,%d stats_ic=%d  seid=%d uplink=%d mode=%s\n",
+                veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
+                veb->uplink_seid,
+               "VEB");
+#endif
+       dev_info(&pf->pdev->dev,
+                "veb bw: enabled_tc=0x%x bw_limit=%d bw_max_quanta=%d is_abs_credits=%d\n",
+                veb->enabled_tc, veb->bw_limit, veb->bw_max_quanta,
+                veb->is_abs_credits);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev, "veb bw: tc=%d bw_share=%d bw_limit=%d max_quanta=%d\n",
+                        i, veb->bw_tc_share_credits[i],
+                        veb->bw_tc_limit_credits[i], veb->bw_tc_max_quanta[i]);
+       }
+       i40e_dbg_dump_eth_stats(pf, &veb->stats);
+}
+
+/**
+ * i40e_dbg_dump_veb_all - dumps all known veb's stats
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
+{
+       struct i40e_veb *veb;
+       int i;
+
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               veb = pf->veb[i];
+               if (veb)
+                       i40e_dbg_dump_veb_seid(pf, veb->seid);
+       }
+}
+
+/**
+ * i40e_dbg_dump_vf - dump VF info
+ * @pf: the i40e_pf created in command write
+ * @vf_id: the vf_id from the user
+ **/
+static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
+{
+       struct i40e_vf *vf;
+       struct i40e_vsi *vsi;
+
+       if (!pf->num_alloc_vfs) {
+               dev_info(&pf->pdev->dev, "no VFs allocated\n");
+       } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
+               vf = &pf->vf[vf_id];
+               vsi = pf->vsi[vf->lan_vsi_idx];
+               dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
+                        vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
+               dev_info(&pf->pdev->dev, "       num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
+                        vf->num_mdd_events,
+                        vf->num_invalid_msgs,
+                        vf->num_valid_msgs);
+       } else {
+               dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
+       }
+}
+
+/**
+ * i40e_dbg_dump_vf_all - dump VF info for all VFs
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
+{
+       int i;
+
+       if (!pf->num_alloc_vfs)
+               dev_info(&pf->pdev->dev, "no VFs enabled!\n");
+       else
+               for (i = 0; i < pf->num_alloc_vfs; i++)
+                       i40e_dbg_dump_vf(pf, i);
+}
+
+/**
+ * i40e_dbg_dump_dcb_cfg - Dump DCB config data struct
+ * @pf: the corresponding PF
+ * @cfg: DCB Config data structure
+ * @prefix: Prefix string
+ **/
+static void i40e_dbg_dump_dcb_cfg(struct i40e_pf *pf,
+                                 struct i40e_dcbx_config *cfg,
+                                 char *prefix)
+{
+       int i;
+
+       dev_info(&pf->pdev->dev,
+                "%s ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+                prefix, cfg->etscfg.willing, cfg->etscfg.cbs,
+                cfg->etscfg.maxtcs);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev, "%s ets_cfg: up=%d tc=%d\n",
+                        prefix, i, cfg->etscfg.prioritytable[i]);
+       }
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev, "%s ets_cfg: tc=%d tcbw=%d tctsa=%d\n",
+                        prefix, i, cfg->etscfg.tcbwtable[i],
+                        cfg->etscfg.tsatable[i]);
+       }
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev, "%s ets_rec: up=%d tc=%d\n",
+                        prefix, i, cfg->etsrec.prioritytable[i]);
+       }
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               dev_info(&pf->pdev->dev, "%s ets_rec: tc=%d tcbw=%d tctsa=%d\n",
+                        prefix, i, cfg->etsrec.tcbwtable[i],
+                        cfg->etsrec.tsatable[i]);
+       }
+       dev_info(&pf->pdev->dev,
+                "%s pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+                prefix, cfg->pfc.willing, cfg->pfc.mbc,
+                cfg->pfc.pfccap, cfg->pfc.pfcenable);
+
+       dev_info(&pf->pdev->dev,
+                "%s app_table: num_apps=%d\n", prefix, cfg->numapps);
+       for (i = 0; i < cfg->numapps; i++) {
+               dev_info(&pf->pdev->dev, "%s app_table: %d prio=%d selector=%d protocol=0x%x\n",
+                        prefix, i, cfg->app[i].priority,
+                        cfg->app[i].selector,
+                        cfg->app[i].protocolid);
+       }
+}
+#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
+/**
+ * i40e_dbg_command_write - write into command datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_write(struct file *filp,
+                                     const char __user *buffer,
+                                     size_t count, loff_t *ppos)
+{
+       struct i40e_pf *pf = filp->private_data;
+       char *cmd_buf, *cmd_buf_tmp;
+       int bytes_not_copied;
+       struct i40e_vsi *vsi;
+       int vsi_seid;
+       int veb_seid;
+       int vf_id;
+       int cnt;
+
+       /* don't allow partial writes */
+       if (*ppos != 0)
+               return 0;
+
+       cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+       if (!cmd_buf)
+               return count;
+       bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
+       if (bytes_not_copied < 0) {
+               kfree(cmd_buf);
+               return bytes_not_copied;
+       }
+       if (bytes_not_copied > 0)
+               count -= bytes_not_copied;
+       cmd_buf[count] = '\0';
+
+       cmd_buf_tmp = strchr(cmd_buf, '\n');
+       if (cmd_buf_tmp) {
+               *cmd_buf_tmp = '\0';
+               count = cmd_buf_tmp - cmd_buf + 1;
+       }
+
+       if (strncmp(cmd_buf, "read", 4) == 0) {
+               u32 address;
+               u32 value;
+
+               cnt = sscanf(&cmd_buf[4], "%i", &address);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev, "read <reg>\n");
+                       goto command_write_done;
+               }
+
+               /* check the range on address */
+               if (address > (pf->ioremap_len - sizeof(u32))) {
+                       dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
+                                address, (pf->ioremap_len - sizeof(u32)));
+                       goto command_write_done;
+               }
+
+               value = rd32(&pf->hw, address);
+               dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
+                        address, value);
+
+       } else if (strncmp(cmd_buf, "write", 5) == 0) {
+               u32 address, value;
+
+               cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev, "write <reg> <value>\n");
+                       goto command_write_done;
+               }
+
+               /* check the range on address */
+               if (address > (pf->ioremap_len - sizeof(u32))) {
+                       dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
+                                address, (pf->ioremap_len - sizeof(u32)));
+                       goto command_write_done;
+               }
+               wr32(&pf->hw, address, value);
+               value = rd32(&pf->hw, address);
+               dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
+                        address, value);
+
+       } else if (strncmp(cmd_buf, "add vsi", 7) == 0) {
+               vsi_seid = -1;
+               cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+               if (cnt == 0) {
+                       /* default to PF VSI */
+                       vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+               } else if (vsi_seid < 0) {
+                       dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
+                                vsi_seid);
+                       goto command_write_done;
+               }
+
+               /* By default we are in VEPA mode, if this is the first VF/VMDq
+                * VSI to be added switch to VEB mode.
+                */
+               if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset_safe(pf,
+                                          BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               }
+
+               vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
+               if (vsi)
+                       dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
+                                vsi->seid, vsi->uplink_seid);
+               else
+                       dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
+
+       } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
+               cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev,
+                                "del vsi: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               }
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
+                                vsi_seid);
+                       goto command_write_done;
+               }
+
+               dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
+               i40e_vsi_release(vsi);
+
+       } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
+               struct i40e_veb *veb;
+               int uplink_seid, i;
+
+               cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev,
+                                "add relay: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               } else if (uplink_seid < 0) {
+                       dev_info(&pf->pdev->dev,
+                                "add relay %d: bad uplink seid\n",
+                                uplink_seid);
+                       goto command_write_done;
+               }
+
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev,
+                                "add relay: VSI %d not found\n", vsi_seid);
+                       goto command_write_done;
+               }
+
+               for (i = 0; i < I40E_MAX_VEB; i++)
+                       if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
+                               break;
+               if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
+                   uplink_seid != pf->mac_seid) {
+                       dev_info(&pf->pdev->dev,
+                                "add relay: relay uplink %d not found\n",
+                                uplink_seid);
+                       goto command_write_done;
+               }
+
+               veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
+                                    vsi->tc_config.enabled_tc);
+               if (veb)
+                       dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
+               else
+                       dev_info(&pf->pdev->dev, "add relay failed\n");
+
+       } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+               int i;
+
+               cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev,
+                                "del relay: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               } else if (veb_seid < 0) {
+                       dev_info(&pf->pdev->dev,
+                                "del relay %d: bad relay seid\n", veb_seid);
+                       goto command_write_done;
+               }
+
+               /* find the veb */
+               for (i = 0; i < I40E_MAX_VEB; i++)
+                       if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+                               break;
+               if (i >= I40E_MAX_VEB) {
+                       dev_info(&pf->pdev->dev,
+                                "del relay: relay %d not found\n", veb_seid);
+                       goto command_write_done;
+               }
+
+               dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
+               i40e_veb_release(pf->veb[i]);
+
+       } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
+               struct i40e_mac_filter *f;
+               int vlan = 0;
+               u8 ma[6];
+               int ret;
+
+               cnt = sscanf(&cmd_buf[11],
+                            "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+                            &vsi_seid,
+                            &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+                            &vlan);
+               if (cnt == 7) {
+                       vlan = 0;
+               } else if (cnt != 8) {
+                       dev_info(&pf->pdev->dev,
+                                "add macaddr: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               }
+
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev,
+                                "add macaddr: VSI %d not found\n", vsi_seid);
+                       goto command_write_done;
+               }
+
+               spin_lock_bh(&vsi->mac_filter_list_lock);
+               f = i40e_add_filter(vsi, ma, vlan, false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+               ret = i40e_sync_vsi_filters(vsi, true);
+               if (f && !ret)
+                       dev_info(&pf->pdev->dev,
+                                "add macaddr: %pM vlan=%d added to VSI %d\n",
+                                ma, vlan, vsi_seid);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
+                                ma, vlan, vsi_seid, f, ret);
+
+       } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
+               int vlan = 0;
+               u8 ma[6];
+               int ret;
+
+               cnt = sscanf(&cmd_buf[11],
+                            "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+                            &vsi_seid,
+                            &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+                            &vlan);
+               if (cnt == 7) {
+                       vlan = 0;
+               } else if (cnt != 8) {
+                       dev_info(&pf->pdev->dev,
+                                "del macaddr: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               }
+
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev,
+                                "del macaddr: VSI %d not found\n", vsi_seid);
+                       goto command_write_done;
+               }
+
+               spin_lock_bh(&vsi->mac_filter_list_lock);
+               i40e_del_filter(vsi, ma, vlan, false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+               ret = i40e_sync_vsi_filters(vsi, true);
+               if (!ret)
+                       dev_info(&pf->pdev->dev,
+                                "del macaddr: %pM vlan=%d removed from VSI %d\n",
+                                ma, vlan, vsi_seid);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
+                                ma, vlan, vsi_seid, ret);
+
+       } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
+               i40e_status ret;
+               u16 vid;
+               int v;
+
+               cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev,
+                                "add pvid: bad command string, cnt=%d\n", cnt);
+                       goto command_write_done;
+               }
+
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
+                                vsi_seid);
+                       goto command_write_done;
+               }
+
+               vid = (unsigned)v;
+               ret = i40e_vsi_add_pvid(vsi, vid);
+               if (!ret)
+                       dev_info(&pf->pdev->dev,
+                                "add pvid: %d added to VSI %d\n",
+                                vid, vsi_seid);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "add pvid: %d to VSI %d failed, ret=%d\n",
+                                vid, vsi_seid, ret);
+
+       } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
+
+               cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev,
+                                "del pvid: bad command string, cnt=%d\n",
+                                cnt);
+                       goto command_write_done;
+               }
+
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev,
+                                "del pvid: VSI %d not found\n", vsi_seid);
+                       goto command_write_done;
+               }
+
+               i40e_vsi_remove_pvid(vsi);
+               dev_info(&pf->pdev->dev,
+                        "del pvid: removed from VSI %d\n", vsi_seid);
+
+       } else if (strncmp(cmd_buf, "dump", 4) == 0) {
+               if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
+                       i40e_fetch_switch_configuration(pf, true);
+               } else if (strncmp(&cmd_buf[5], "capabilities", 7) == 0) {
+                       i40e_dbg_dump_capabilities(pf);
+               } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
+                       cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+                       if (cnt > 0)
+                               i40e_dbg_dump_vsi_seid(pf, vsi_seid);
+                       else
+                               i40e_dbg_dump_vsi_no_seid(pf);
+               } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
+                       cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+                       if (cnt > 0)
+                               i40e_dbg_dump_veb_seid(pf, vsi_seid);
+                       else
+                               i40e_dbg_dump_veb_all(pf);
+               } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
+                       cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
+                       if (cnt > 0)
+                               i40e_dbg_dump_vf(pf, vf_id);
+                       else
+                               i40e_dbg_dump_vf_all(pf);
+               } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
+                       int ring_id, desc_n;
+
+                       if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
+                               cnt = sscanf(&cmd_buf[12], "%i %i %i",
+                                            &vsi_seid, &ring_id, &desc_n);
+                               i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+                                                  desc_n, pf, true);
+                       } else if (strncmp(&cmd_buf[10], "tx", 2)
+                                       == 0) {
+                               cnt = sscanf(&cmd_buf[12], "%i %i %i",
+                                            &vsi_seid, &ring_id, &desc_n);
+                               i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+                                                  desc_n, pf, false);
+                       } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
+                               i40e_dbg_dump_aq_desc(pf);
+                       } else {
+                               dev_info(&pf->pdev->dev,
+                                        "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+                               dev_info(&pf->pdev->dev,
+                                        "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+                               dev_info(&pf->pdev->dev, "dump desc aq\n");
+                       }
+               } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
+                       dev_info(&pf->pdev->dev,
+                                "core reset count: %d\n", pf->corer_count);
+                       dev_info(&pf->pdev->dev,
+                                "global reset count: %d\n", pf->globr_count);
+                       dev_info(&pf->pdev->dev,
+                                "emp reset count: %d\n", pf->empr_count);
+                       dev_info(&pf->pdev->dev,
+                                "pf reset count: %d\n", pf->pfr_count);
+                       dev_info(&pf->pdev->dev,
+                                "pf tx sluggish count: %d\n",
+                                pf->tx_sluggish_count);
+               } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
+                       struct i40e_aqc_query_port_ets_config_resp *bw_data;
+                       struct i40e_dcbx_config *cfg =
+                                               &pf->hw.local_dcbx_config;
+                       struct i40e_dcbx_config *r_cfg =
+                                               &pf->hw.remote_dcbx_config;
+                       struct i40e_dcbx_config *d_cfg =
+                                               &pf->hw.desired_dcbx_config;
+                       int i, ret;
+
+                       bw_data = kzalloc(sizeof(
+                                   struct i40e_aqc_query_port_ets_config_resp),
+                                         GFP_KERNEL);
+                       if (!bw_data) {
+                               ret = -ENOMEM;
+                               goto command_write_done;
+                       }
+
+                       ret = i40e_aq_query_port_ets_config(&pf->hw,
+                                                           pf->mac_seid,
+                                                           bw_data, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "Query Port ETS Config AQ command failed =0x%x\n",
+                                        pf->hw.aq.asq_last_status);
+                               kfree(bw_data);
+                               bw_data = NULL;
+                               goto command_write_done;
+                       }
+                       dev_info(&pf->pdev->dev,
+                                "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
+                                bw_data->tc_valid_bits,
+                                bw_data->tc_strict_priority_bits,
+                                le16_to_cpu(bw_data->tc_bw_max[0]),
+                                le16_to_cpu(bw_data->tc_bw_max[1]));
+                       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+                               dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
+                                        bw_data->tc_bw_share_credits[i],
+                                        le16_to_cpu(bw_data->tc_bw_limits[i]));
+                       }
+
+                       kfree(bw_data);
+                       bw_data = NULL;
+
+                       if (cfg->dcbx_mode == I40E_DCBX_MODE_CEE) {
+                               dev_info(&pf->pdev->dev,
+                                        "CEE DCBX mode with Oper TLV Status = 0x%x\n",
+                                        cfg->tlv_status);
+                               i40e_dbg_dump_dcb_cfg(pf, d_cfg, "DesiredCfg");
+                       } else {
+                               dev_info(&pf->pdev->dev, "IEEE DCBX mode\n");
+                       }
+
+                       i40e_dbg_dump_dcb_cfg(pf, cfg, "OperCfg");
+                       i40e_dbg_dump_dcb_cfg(pf, r_cfg, "PeerCfg");
+
+               } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
+                       int cluster_id, table_id;
+                       int index, ret;
+                       u16 buff_len = 4096;
+                       u32 next_index;
+                       u8 next_table;
+                       u8 *buff;
+                       u16 rlen;
+
+                       cnt = sscanf(&cmd_buf[18], "%i %i %i",
+                                    &cluster_id, &table_id, &index);
+                       if (cnt != 3) {
+                               dev_info(&pf->pdev->dev,
+                                        "dump debug fwdata <cluster_id> <table_id> <index>\n");
+                               goto command_write_done;
+                       }
+
+                       dev_info(&pf->pdev->dev,
+                                "AQ debug dump fwdata params %x %x %x %x\n",
+                                cluster_id, table_id, index, buff_len);
+                       buff = kzalloc(buff_len, GFP_KERNEL);
+                       if (!buff)
+                               goto command_write_done;
+
+                       ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
+                                                index, buff_len, buff, &rlen,
+                                                &next_table, &next_index,
+                                                NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "debug dump fwdata AQ Failed %d 0x%x\n",
+                                        ret, pf->hw.aq.asq_last_status);
+                               kfree(buff);
+                               buff = NULL;
+                               goto command_write_done;
+                       }
+                       dev_info(&pf->pdev->dev,
+                                "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
+                                rlen, next_table, next_index);
+                       print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+                                      DUMP_PREFIX_OFFSET, 16, 1,
+                                      buff, rlen, true);
+                       kfree(buff);
+                       buff = NULL;
+               } else {
+                       dev_info(&pf->pdev->dev,
+                                "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
+                       dev_info(&pf->pdev->dev, "dump switch\n");
+                       dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
+                       dev_info(&pf->pdev->dev, "dump capabilities\n");
+                       dev_info(&pf->pdev->dev, "dump reset stats\n");
+                       dev_info(&pf->pdev->dev, "dump port\n");
+                       dev_info(&pf->pdev->dev, "dump VF [vf_id]\n");
+                       dev_info(&pf->pdev->dev,
+                                "dump debug fwdata <cluster_id> <table_id> <index>\n");
+               }
+
+       } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
+               u32 level;
+
+               cnt = sscanf(&cmd_buf[10], "%i", &level);
+               if (cnt) {
+                       if (I40E_DEBUG_USER & level) {
+                               pf->hw.debug_mask = level;
+                               dev_info(&pf->pdev->dev,
+                                        "set hw.debug_mask = 0x%08x\n",
+                                        pf->hw.debug_mask);
+                       }
+                       pf->msg_enable = level;
+                       dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
+                                pf->msg_enable);
+               } else {
+                       dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
+                                pf->msg_enable);
+               }
+       } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
+               dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
+               i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
+
+       } else if (strncmp(cmd_buf, "corer", 5) == 0) {
+               dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
+               i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
+
+       } else if (strncmp(cmd_buf, "globr", 5) == 0) {
+               dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
+               i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
+       } else if (strncmp(cmd_buf, "defport on", 10) == 0) {
+               dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport enabled\n");
+               pf->cur_promisc = true;
+               i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
+       } else if (strncmp(cmd_buf, "defport off", 11) == 0) {
+               dev_info(&pf->pdev->dev, "debugfs: forcing PFR with defport disabled\n");
+               pf->cur_promisc = false;
+               i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
+
+       } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
+               if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
+                       cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
+                       if (cnt == 0) {
+                               int i;
+
+                               for (i = 0; i < pf->num_alloc_vsi; i++)
+                                       i40e_vsi_reset_stats(pf->vsi[i]);
+                               dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
+                       } else if (cnt == 1) {
+                               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+                               if (!vsi) {
+                                       dev_info(&pf->pdev->dev,
+                                                "clear_stats vsi: bad vsi %d\n",
+                                                vsi_seid);
+                                       goto command_write_done;
+                               }
+                               i40e_vsi_reset_stats(vsi);
+                               dev_info(&pf->pdev->dev,
+                                        "vsi clear stats called for vsi %d\n",
+                                        vsi_seid);
+                       } else {
+                               dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
+                       }
+               } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
+                       if (pf->hw.partition_id == 1) {
+                               i40e_pf_reset_stats(pf);
+                               dev_info(&pf->pdev->dev, "port stats cleared\n");
+                       } else {
+                               dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
+                       }
+               } else {
+                       dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
+               }
+       } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+               struct i40e_aq_desc *desc;
+               i40e_status ret;
+
+               desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+               if (!desc)
+                       goto command_write_done;
+               cnt = sscanf(&cmd_buf[11],
+                            "%hi %hi %hi %hi %i %i %i %i %i %i",
+                            &desc->flags,
+                            &desc->opcode, &desc->datalen, &desc->retval,
+                            &desc->cookie_high, &desc->cookie_low,
+                            &desc->params.internal.param0,
+                            &desc->params.internal.param1,
+                            &desc->params.internal.param2,
+                            &desc->params.internal.param3);
+               if (cnt != 10) {
+                       dev_info(&pf->pdev->dev,
+                                "send aq_cmd: bad command string, cnt=%d\n",
+                                cnt);
+                       kfree(desc);
+                       desc = NULL;
+                       goto command_write_done;
+               }
+               ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
+               if (!ret) {
+                       dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+               } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+                       dev_info(&pf->pdev->dev,
+                                "AQ command send failed Opcode %x AQ Error: %d\n",
+                                desc->opcode, pf->hw.aq.asq_last_status);
+               } else {
+                       dev_info(&pf->pdev->dev,
+                                "AQ command send failed Opcode %x Status: %d\n",
+                                desc->opcode, ret);
+               }
+               dev_info(&pf->pdev->dev,
+                        "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                        desc->flags, desc->opcode, desc->datalen, desc->retval,
+                        desc->cookie_high, desc->cookie_low,
+                        desc->params.internal.param0,
+                        desc->params.internal.param1,
+                        desc->params.internal.param2,
+                        desc->params.internal.param3);
+               kfree(desc);
+               desc = NULL;
+       } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+               struct i40e_aq_desc *desc;
+               i40e_status ret;
+               u16 buffer_len;
+               u8 *buff;
+
+               desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+               if (!desc)
+                       goto command_write_done;
+               cnt = sscanf(&cmd_buf[20],
+                            "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
+                            &desc->flags,
+                            &desc->opcode, &desc->datalen, &desc->retval,
+                            &desc->cookie_high, &desc->cookie_low,
+                            &desc->params.internal.param0,
+                            &desc->params.internal.param1,
+                            &desc->params.internal.param2,
+                            &desc->params.internal.param3,
+                            &buffer_len);
+               if (cnt != 11) {
+                       dev_info(&pf->pdev->dev,
+                                "send indirect aq_cmd: bad command string, cnt=%d\n",
+                                cnt);
+                       kfree(desc);
+                       desc = NULL;
+                       goto command_write_done;
+               }
+               /* Just stub a buffer big enough in case user messed up */
+               if (buffer_len == 0)
+                       buffer_len = 1280;
+
+               buff = kzalloc(buffer_len, GFP_KERNEL);
+               if (!buff) {
+                       kfree(desc);
+                       desc = NULL;
+                       goto command_write_done;
+               }
+               desc->flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+               ret = i40e_asq_send_command(&pf->hw, desc, buff,
+                                           buffer_len, NULL);
+               if (!ret) {
+                       dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+               } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+                       dev_info(&pf->pdev->dev,
+                                "AQ command send failed Opcode %x AQ Error: %d\n",
+                                desc->opcode, pf->hw.aq.asq_last_status);
+               } else {
+                       dev_info(&pf->pdev->dev,
+                                "AQ command send failed Opcode %x Status: %d\n",
+                                desc->opcode, ret);
+               }
+               dev_info(&pf->pdev->dev,
+                        "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                        desc->flags, desc->opcode, desc->datalen, desc->retval,
+                        desc->cookie_high, desc->cookie_low,
+                        desc->params.internal.param0,
+                        desc->params.internal.param1,
+                        desc->params.internal.param2,
+                        desc->params.internal.param3);
+               print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+                              DUMP_PREFIX_OFFSET, 16, 1,
+                              buff, buffer_len, true);
+               kfree(buff);
+               buff = NULL;
+               kfree(desc);
+               desc = NULL;
+       } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
+                  (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
+               struct i40e_fdir_filter fd_data;
+               u16 packet_len, i, j = 0;
+               char *asc_packet;
+               u8 *raw_packet;
+               bool add = false;
+               int ret;
+
+               if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+                       goto command_write_done;
+
+               if (strncmp(cmd_buf, "add", 3) == 0)
+                       add = true;
+
+               if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+                       goto command_write_done;
+
+               asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
+                                    GFP_KERNEL);
+               if (!asc_packet)
+                       goto command_write_done;
+
+               raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
+                                    GFP_KERNEL);
+
+               if (!raw_packet) {
+                       kfree(asc_packet);
+                       asc_packet = NULL;
+                       goto command_write_done;
+               }
+
+               cnt = sscanf(&cmd_buf[13],
+                            "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
+                            &fd_data.q_index,
+                            &fd_data.flex_off, &fd_data.pctype,
+                            &fd_data.dest_vsi, &fd_data.dest_ctl,
+                            &fd_data.fd_status, &fd_data.cnt_index,
+                            &fd_data.fd_id, &packet_len, asc_packet);
+               if (cnt != 10) {
+                       dev_info(&pf->pdev->dev,
+                                "program fd_filter: bad command string, cnt=%d\n",
+                                cnt);
+                       kfree(asc_packet);
+                       asc_packet = NULL;
+                       kfree(raw_packet);
+                       goto command_write_done;
+               }
+
+               /* fix packet length if user entered 0 */
+               if (packet_len == 0)
+                       packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
+
+               /* make sure to check the max as well */
+               packet_len = min_t(u16,
+                                  packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
+               printk("packet in ascii %s\n", asc_packet);
+
+               for (i = 0; i < packet_len; i++) {
+                       cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
+                       if (!cnt)
+                               break;
+                       j += 3;
+               }
+               dev_info(&pf->pdev->dev, "FD raw packet dump\n");
+               print_hex_dump(KERN_INFO, "FD raw packet: ",
+                              DUMP_PREFIX_OFFSET, 16, 1,
+                              raw_packet, packet_len, true);
+               ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
+               if (!ret) {
+                       dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
+               } else {
+                       dev_info(&pf->pdev->dev,
+                                "Filter command send failed %d\n", ret);
+               }
+               kfree(raw_packet);
+               raw_packet = NULL;
+               kfree(asc_packet);
+               asc_packet = NULL;
+       } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
+               dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
+                        i40e_get_current_fd_count(pf));
+       } else if ((strncmp(cmd_buf, "add ethtype filter", 18) == 0) ||
+                  (strncmp(cmd_buf, "rem ethtype filter", 18) == 0)) {
+               u16 ethtype;
+               u16 queue;
+               bool add = false;
+               int ret;
+
+               if (strncmp(cmd_buf, "add", 3) == 0)
+                       add = true;
+
+               cnt = sscanf(&cmd_buf[18],
+                            "%hi %hi",
+                            &ethtype, &queue);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev,
+                                "%s ethtype filter: bad command string, cnt=%d\n",
+                                add ? "add" : "rem",
+                                cnt);
+                       goto command_write_done;
+               }
+               ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+                                       pf->hw.mac.addr,
+                                       ethtype, 0,
+                                       pf->vsi[pf->lan_vsi]->seid,
+                                       queue, add, NULL, NULL);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                               "%s: add/rem Control Packet Filter AQ command failed =0x%x\n",
+                               add ? "add" : "rem",
+                               pf->hw.aq.asq_last_status);
+                       goto command_write_done;
+               }
+
+       } else if (strncmp(cmd_buf, "dcb off", 7) == 0) {
+               u8 tc = i40e_pf_get_num_tc(pf);
+               /* Allow disabling only when in single TC mode */
+               if (tc > 1) {
+                       dev_info(&pf->pdev->dev, "Failed to disable DCB as TC count(%d) is greater than 1.\n",
+                                tc);
+                       goto command_write_done;
+               }
+               pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+       } else if (strncmp(cmd_buf, "dcb on", 6) == 0) {
+               pf->flags |= I40E_FLAG_DCB_ENABLED;
+       } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+               if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
+                       int ret;
+
+                       ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "Stop LLDP AQ command failed =0x%x\n",
+                                        pf->hw.aq.asq_last_status);
+                               goto command_write_done;
+                       }
+                       ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+                                               pf->hw.mac.addr,
+                                               I40E_ETH_P_LLDP, 0,
+                                               pf->vsi[pf->lan_vsi]->seid,
+                                               0, true, NULL, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                       "%s: Add Control Packet Filter AQ command failed =0x%x\n",
+                                       __func__, pf->hw.aq.asq_last_status);
+                               goto command_write_done;
+                       }
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
+                       pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+                                      DCB_CAP_DCBX_VER_IEEE;
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
+               } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
+                       int ret;
+
+                       ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+                                               pf->hw.mac.addr,
+                                               I40E_ETH_P_LLDP, 0,
+                                               pf->vsi[pf->lan_vsi]->seid,
+                                               0, false, NULL, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                       "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
+                                       __func__, pf->hw.aq.asq_last_status);
+                               /* Continue and start FW LLDP anyways */
+                       }
+
+                       ret = i40e_aq_start_lldp(&pf->hw, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "Start LLDP AQ command failed =0x%x\n",
+                                        pf->hw.aq.asq_last_status);
+                               goto command_write_done;
+                       }
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
+                       pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+                                      DCB_CAP_DCBX_VER_IEEE;
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
+               } else if (strncmp(&cmd_buf[5],
+                          "get local", 9) == 0) {
+                       u16 llen, rlen;
+                       int ret;
+                       u8 *buff;
+
+                       buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+                       if (!buff)
+                               goto command_write_done;
+
+                       ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
+                                                  I40E_AQ_LLDP_MIB_LOCAL,
+                                                  buff, I40E_LLDPDU_SIZE,
+                                                  &llen, &rlen, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "Get LLDP MIB (local) AQ command failed =0x%x\n",
+                                        pf->hw.aq.asq_last_status);
+                               kfree(buff);
+                               buff = NULL;
+                               goto command_write_done;
+                       }
+                       dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
+                       print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
+                                      DUMP_PREFIX_OFFSET, 16, 1,
+                                      buff, I40E_LLDPDU_SIZE, true);
+                       kfree(buff);
+                       buff = NULL;
+               } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+                       u16 llen, rlen;
+                       int ret;
+                       u8 *buff;
+
+                       buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+                       if (!buff)
+                               goto command_write_done;
+
+                       ret = i40e_aq_get_lldp_mib(&pf->hw,
+                                       I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+                                       I40E_AQ_LLDP_MIB_REMOTE,
+                                       buff, I40E_LLDPDU_SIZE,
+                                       &llen, &rlen, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "Get LLDP MIB (remote) AQ command failed =0x%x\n",
+                                        pf->hw.aq.asq_last_status);
+                               kfree(buff);
+                               buff = NULL;
+                               goto command_write_done;
+                       }
+                       dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
+                       print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
+                                      DUMP_PREFIX_OFFSET, 16, 1,
+                                      buff, I40E_LLDPDU_SIZE, true);
+                       kfree(buff);
+                       buff = NULL;
+               } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
+                       int ret;
+
+                       ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+                                                               true, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
+                                        pf->hw.aq.asq_last_status);
+                               goto command_write_done;
+                       }
+               } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
+                       int ret;
+
+                       ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+                                                               false, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
+                                        pf->hw.aq.asq_last_status);
+                               goto command_write_done;
+                       }
+               }
+       } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
+               u16 buffer_len, bytes;
+               u16 module;
+               u32 offset;
+               u16 *buff;
+               int ret;
+
+               cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
+                            &module, &offset, &buffer_len);
+               if (cnt == 0) {
+                       module = 0;
+                       offset = 0;
+                       buffer_len = 0;
+               } else if (cnt == 1) {
+                       offset = 0;
+                       buffer_len = 0;
+               } else if (cnt == 2) {
+                       buffer_len = 0;
+               } else if (cnt > 3) {
+                       dev_info(&pf->pdev->dev,
+                                "nvm read: bad command string, cnt=%d\n", cnt);
+                       goto command_write_done;
+               }
+
+               /* set the max length */
+               buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
+
+               bytes = 2 * buffer_len;
+
+               /* read at least 1k bytes, no more than 4kB */
+               bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
+               buff = kzalloc(bytes, GFP_KERNEL);
+               if (!buff)
+                       goto command_write_done;
+
+               ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+                                ret, pf->hw.aq.asq_last_status);
+                       kfree(buff);
+                       goto command_write_done;
+               }
+
+               ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
+                                      bytes, (u8 *)buff, true, NULL);
+               i40e_release_nvm(&pf->hw);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Read NVM AQ failed err=%d status=0x%x\n",
+                                ret, pf->hw.aq.asq_last_status);
+               } else {
+                       dev_info(&pf->pdev->dev,
+                                "Read NVM module=0x%x offset=0x%x words=%d\n",
+                                module, offset, buffer_len);
+                       if (bytes)
+                               print_hex_dump(KERN_INFO, "NVM Dump: ",
+                                       DUMP_PREFIX_OFFSET, 16, 2,
+                                       buff, bytes, true);
+               }
+               kfree(buff);
+               buff = NULL;
+       } else if (strncmp(cmd_buf, "set rss_size", 12) == 0) {
+               int q_count;
+
+               cnt = sscanf(&cmd_buf[12], "%i", &q_count);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev,
+                                "set rss_size: bad command string, cnt=%d\n", cnt);
+                       goto command_write_done;
+               }
+               if (q_count <= 0) {
+                       dev_info(&pf->pdev->dev,
+                                "set rss_size: %d is too small\n",
+                                q_count);
+                       goto command_write_done;
+               }
+               dev_info(&pf->pdev->dev,
+                        "set rss_size requesting %d queues\n", q_count);
+               rtnl_lock();
+               i40e_reconfig_rss_queues(pf, q_count);
+               rtnl_unlock();
+               dev_info(&pf->pdev->dev, "new rss_size %d\n", pf->rss_size);
+       } else if (strncmp(cmd_buf, "get bw", 6) == 0) {
+               i40e_status status;
+               u32 max_bw, min_bw;
+               bool min_valid, max_valid;
+
+               status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+                                                  &min_valid, &max_valid);
+
+               if (status) {
+                       dev_info(&pf->pdev->dev, "get bw failed with status %d\n",
+                               status);
+                       goto command_write_done;
+               }
+               if (!min_valid) {
+                       dev_info(&pf->pdev->dev, "min bw invalid\n");
+               } else if (min_bw & I40E_ALT_BW_RELATIVE_MASK) {
+                       dev_info(&pf->pdev->dev, "relative min bw = %d%%\n",
+                               min_bw & I40E_ALT_BW_VALUE_MASK);
+               } else {
+                       dev_info(&pf->pdev->dev, "absolute min bw = %dMb/s\n",
+                               (min_bw & I40E_ALT_BW_VALUE_MASK)*128);
+               }
+               if (!max_valid) {
+                       dev_info(&pf->pdev->dev, "max bw invalid\n");
+               } else if (max_bw & I40E_ALT_BW_RELATIVE_MASK) {
+                       dev_info(&pf->pdev->dev, "relative max bw = %d%%\n",
+                               max_bw & I40E_ALT_BW_VALUE_MASK);
+               } else {
+                       dev_info(&pf->pdev->dev, "absolute max bw = %dMb/s\n",
+                               (max_bw & I40E_ALT_BW_VALUE_MASK)*128);
+               }
+       } else if (strncmp(cmd_buf, "set bw", 6) == 0) {
+               struct i40e_aqc_configure_partition_bw_data bw_data;
+               i40e_status status;
+               u32 max_bw, min_bw;
+
+               /* Set the valid bit for this PF */
+               bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
+
+               /* Get the bw's */
+               cnt = sscanf(&cmd_buf[7], "%d %d", &max_bw, &min_bw);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev,"set bw <MAX> <MIN>\n");
+                       goto command_write_done;
+               }
+               bw_data.max_bw[pf->hw.pf_id] = max_bw;
+               bw_data.min_bw[pf->hw.pf_id] = min_bw;
+
+               /* Set the new bandwidths */
+               status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
+               if (status) {
+                       dev_info(&pf->pdev->dev, "configure partition bw failed with status %d\n",
+                                status);
+                       goto command_write_done;
+               }
+       } else if (strncmp(cmd_buf, "commit bw", 9) == 0) {
+               /* Commit temporary BW setting to permanent NVM image */
+               enum i40e_admin_queue_err last_aq_status;
+               i40e_status aq_status;
+               u16 nvm_word;
+
+               if (pf->hw.partition_id != 1) {
+                       dev_info(&pf->pdev->dev,
+                                "Commit BW only works on first partition!\n");
+                       goto command_write_done;
+               }
+
+               /* Acquire NVM for read access */
+               aq_status = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+               if (aq_status) {
+                       dev_info(&pf->pdev->dev,
+                                "Error %d: Cannot acquire NVM for Read Access\n",
+                                aq_status);
+                       goto command_write_done;
+               }
+
+               /* Read word 0x10 of NVM - SW compatibility word 1 */
+               aq_status = i40e_aq_read_nvm(&pf->hw,
+                                            I40E_SR_NVM_CONTROL_WORD,
+                                            0x10, sizeof(nvm_word), &nvm_word,
+                                            false, NULL);
+               /* Save off last admin queue command status before releasing
+                * the NVM
+                */
+               last_aq_status = pf->hw.aq.asq_last_status;
+               i40e_release_nvm(&pf->hw);
+               if (aq_status) {
+                       dev_info(&pf->pdev->dev, "NVM read error %d:%d\n",
+                                aq_status, last_aq_status);
+                       goto command_write_done;
+               }
+
+               /* Wait a bit for NVM release to complete */
+               msleep(100);
+
+               /* Acquire NVM for write access */
+               aq_status = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
+               if (aq_status) {
+                       dev_info(&pf->pdev->dev,
+                                "Error %d: Cannot acquire NVM for Write Access\n",
+                                aq_status);
+                       goto command_write_done;
+               }
+               /* Write it back out unchanged to initiate update NVM,
+                * which will force a write of the shadow (alt) RAM to
+                * the NVM - thus storing the bandwidth values permanently.
+                */
+               aq_status = i40e_aq_update_nvm(&pf->hw,
+                                              I40E_SR_NVM_CONTROL_WORD,
+                                              0x10, sizeof(nvm_word),
+                                              &nvm_word, true, NULL);
+               /* Save off last admin queue command status before releasing
+                * the NVM
+                */
+               last_aq_status = pf->hw.aq.asq_last_status;
+               i40e_release_nvm(&pf->hw);
+               if (aq_status)
+                       dev_info(&pf->pdev->dev,
+                                "BW settings NOT SAVED - error %d:%d updating NVM\n",
+                                aq_status, last_aq_status);
+       } else {
+               dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
+               dev_info(&pf->pdev->dev, "available commands\n");
+               dev_info(&pf->pdev->dev, "  add vsi [relay_seid]\n");
+               dev_info(&pf->pdev->dev, "  del vsi [vsi_seid]\n");
+               dev_info(&pf->pdev->dev, "  add relay <uplink_seid> <vsi_seid>\n");
+               dev_info(&pf->pdev->dev, "  del relay <relay_seid>\n");
+               dev_info(&pf->pdev->dev, "  add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+               dev_info(&pf->pdev->dev, "  del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+               dev_info(&pf->pdev->dev, "  add pvid <vsi_seid> <vid>\n");
+               dev_info(&pf->pdev->dev, "  del pvid <vsi_seid>\n");
+               dev_info(&pf->pdev->dev, "  dump switch\n");
+               dev_info(&pf->pdev->dev, "  dump vsi [seid]\n");
+               dev_info(&pf->pdev->dev, "  dump capabilities\n");
+               dev_info(&pf->pdev->dev, "  dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+               dev_info(&pf->pdev->dev, "  dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+               dev_info(&pf->pdev->dev, "  dump desc aq\n");
+               dev_info(&pf->pdev->dev, "  dump reset stats\n");
+               dev_info(&pf->pdev->dev, "  dump debug fwdata <cluster_id> <table_id> <index>\n");
+               dev_info(&pf->pdev->dev, "  msg_enable [level]\n");
+               dev_info(&pf->pdev->dev, "  read <reg>\n");
+               dev_info(&pf->pdev->dev, "  write <reg> <value>\n");
+               dev_info(&pf->pdev->dev, "  clear_stats vsi [seid]\n");
+               dev_info(&pf->pdev->dev, "  clear_stats port\n");
+               dev_info(&pf->pdev->dev, "  pfr\n");
+               dev_info(&pf->pdev->dev, "  corer\n");
+               dev_info(&pf->pdev->dev, "  globr\n");
+               dev_info(&pf->pdev->dev, "  defport on\n");
+               dev_info(&pf->pdev->dev, "  defport off\n");
+               dev_info(&pf->pdev->dev, "  send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
+               dev_info(&pf->pdev->dev, "  send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
+               dev_info(&pf->pdev->dev, "  add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+               dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+               dev_info(&pf->pdev->dev, "  fd current cnt");
+               dev_info(&pf->pdev->dev, "  add ethtype filter <ethtype> <to_queue>");
+               dev_info(&pf->pdev->dev, "  rem ethtype filter <ethtype> <to_queue>");
+               dev_info(&pf->pdev->dev, "  lldp start\n");
+               dev_info(&pf->pdev->dev, "  lldp stop\n");
+               dev_info(&pf->pdev->dev, "  lldp get local\n");
+               dev_info(&pf->pdev->dev, "  lldp get remote\n");
+               dev_info(&pf->pdev->dev, "  lldp event on\n");
+               dev_info(&pf->pdev->dev, "  lldp event off\n");
+               dev_info(&pf->pdev->dev, "  nvm read [module] [word_offset] [word_count]\n");
+               dev_info(&pf->pdev->dev, "  set rss_size <count>\n");
+               dev_info(&pf->pdev->dev, "  dcb off\n");
+               dev_info(&pf->pdev->dev, "  dcb on\n");
+               dev_info(&pf->pdev->dev, "  get bw\n");
+               dev_info(&pf->pdev->dev, "  set bw <MAX> <MIN>\n");
+               dev_info(&pf->pdev->dev, "  commit bw\n");
+       }
+
+command_write_done:
+       kfree(cmd_buf);
+       cmd_buf = NULL;
+       return count;
+}
+
+static const struct file_operations i40e_dbg_command_fops = {
+       .owner = THIS_MODULE,
+       .open =  simple_open,
+       .read =  i40e_dbg_command_read,
+       .write = i40e_dbg_command_write,
+};
+
+/**************************************************************
+ * netdev_ops
+ * The netdev_ops entry in debugfs is for giving the driver commands
+ * to be executed from the netdev operations.
+ **************************************************************/
+static char i40e_dbg_netdev_ops_buf[256] = "";
+
+/**
+ * i40e_dbg_netdev_ops - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
+                                       size_t count, loff_t *ppos)
+{
+       struct i40e_pf *pf = filp->private_data;
+       int bytes_not_copied;
+       int buf_size = 256;
+       char *buf;
+       int len;
+
+       /* don't allow partal reads */
+       if (*ppos != 0)
+               return 0;
+       if (count < buf_size)
+               return -ENOSPC;
+
+       buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!buf)
+               return -ENOSPC;
+
+       len = snprintf(buf, buf_size, "%s: %s\n",
+                      pf->vsi[pf->lan_vsi]->netdev->name,
+                      i40e_dbg_netdev_ops_buf);
+
+       bytes_not_copied = copy_to_user(buffer, buf, len);
+       kfree(buf);
+
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+
+       *ppos = len;
+       return len;
+}
+
+/**
+ * i40e_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+                                        const char __user *buffer,
+                                        size_t count, loff_t *ppos)
+{
+       struct i40e_pf *pf = filp->private_data;
+       int bytes_not_copied;
+       struct i40e_vsi *vsi;
+       char *buf_tmp;
+       int vsi_seid;
+       int i, cnt;
+
+       /* don't allow partial writes */
+       if (*ppos != 0)
+               return 0;
+       if (count >= sizeof(i40e_dbg_netdev_ops_buf))
+               return -ENOSPC;
+
+       memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
+       bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
+                                         buffer, count);
+       if (bytes_not_copied < 0)
+               return bytes_not_copied;
+       else if (bytes_not_copied > 0)
+               count -= bytes_not_copied;
+       i40e_dbg_netdev_ops_buf[count] = '\0';
+
+       buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+       if (buf_tmp) {
+               *buf_tmp = '\0';
+               count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+       }
+
+       if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+               cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
+                       goto netdev_ops_write_done;
+               }
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev,
+                                "tx_timeout: VSI %d not found\n", vsi_seid);
+               } else if (!vsi->netdev) {
+                       dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
+                                vsi_seid);
+               } else if (test_bit(__I40E_DOWN, &vsi->state)) {
+                       dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
+                                vsi_seid);
+               } else if (rtnl_trylock()) {
+                       vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
+                       rtnl_unlock();
+                       dev_info(&pf->pdev->dev, "tx_timeout called\n");
+               } else {
+                       dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+               }
+       } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+               int mtu;
+
+               cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
+                            &vsi_seid, &mtu);
+               if (cnt != 2) {
+                       dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
+                       goto netdev_ops_write_done;
+               }
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev,
+                                "change_mtu: VSI %d not found\n", vsi_seid);
+               } else if (!vsi->netdev) {
+                       dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
+                                vsi_seid);
+               } else if (rtnl_trylock()) {
+                       vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
+                                                               mtu);
+                       rtnl_unlock();
+                       dev_info(&pf->pdev->dev, "change_mtu called\n");
+               } else {
+                       dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+               }
+
+       } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
+               cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
+                       goto netdev_ops_write_done;
+               }
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev,
+                                "set_rx_mode: VSI %d not found\n", vsi_seid);
+               } else if (!vsi->netdev) {
+                       dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
+                                vsi_seid);
+               } else if (rtnl_trylock()) {
+                       vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
+                       rtnl_unlock();
+                       dev_info(&pf->pdev->dev, "set_rx_mode called\n");
+               } else {
+                       dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+               }
+
+       } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
+               cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
+                       goto netdev_ops_write_done;
+               }
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
+                                vsi_seid);
+               } else if (!vsi->netdev) {
+                       dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
+                                vsi_seid);
+               } else {
+                       for (i = 0; i < vsi->num_q_vectors; i++)
+                               napi_schedule(&vsi->q_vectors[i]->napi);
+                       dev_info(&pf->pdev->dev, "napi called\n");
+               }
+       } else if (strncmp(i40e_dbg_netdev_ops_buf,
+                          "toggle_tx_timeout", 17) == 0) {
+               cnt = sscanf(&i40e_dbg_netdev_ops_buf[17], "%i", &vsi_seid);
+               if (cnt != 1) {
+                       dev_info(&pf->pdev->dev, "toggle_tx_timeout <vsi_seid>\n");
+                       goto netdev_ops_write_done;
+               }
+               vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev, "toggle_tx_timeout: VSI %d not found\n",
+                                vsi_seid);
+               } else {
+                       if (vsi->block_tx_timeout)
+                               vsi->block_tx_timeout = false;
+                       else
+                               vsi->block_tx_timeout = true;
+                       dev_info(&pf->pdev->dev, "toggle_tx_timeout: block_tx_timeout = %d\n",
+                                vsi->block_tx_timeout);
+               }
+       } else {
+               dev_info(&pf->pdev->dev, "unknown command '%s'\n",
+                        i40e_dbg_netdev_ops_buf);
+               dev_info(&pf->pdev->dev, "available commands\n");
+               dev_info(&pf->pdev->dev, "  tx_timeout <vsi_seid>\n");
+               dev_info(&pf->pdev->dev, "  change_mtu <vsi_seid> <mtu>\n");
+               dev_info(&pf->pdev->dev, "  set_rx_mode <vsi_seid>\n");
+               dev_info(&pf->pdev->dev, "  napi <vsi_seid>\n");
+               dev_info(&pf->pdev->dev, "  toggle_tx_timeout <vsi_seid>\n");
+       }
+netdev_ops_write_done:
+       return count;
+}
+
+static const struct file_operations i40e_dbg_netdev_ops_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = i40e_dbg_netdev_ops_read,
+       .write = i40e_dbg_netdev_ops_write,
+};
+
+/**
+ * i40e_dbg_pf_init - setup the debugfs directory for the PF
+ * @pf: the PF that is starting up
+ **/
+void i40e_dbg_pf_init(struct i40e_pf *pf)
+{
+       struct dentry *pfile;
+       const char *name = pci_name(pf->pdev);
+       const struct device *dev = &pf->pdev->dev;
+
+       pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
+       if (!pf->i40e_dbg_pf)
+               return;
+
+       pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+                                   &i40e_dbg_command_fops);
+       if (!pfile)
+               goto create_failed;
+
+       pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+                                   &i40e_dbg_dump_fops);
+       if (!pfile)
+               goto create_failed;
+
+       pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+                                   &i40e_dbg_netdev_ops_fops);
+       if (!pfile)
+               goto create_failed;
+
+       return;
+
+create_failed:
+       dev_info(dev, "debugfs dir/file for %s failed\n", name);
+       debugfs_remove_recursive(pf->i40e_dbg_pf);
+}
+
+/**
+ * i40e_dbg_pf_exit - clear out the PF's debugfs entries
+ * @pf: the PF that is stopping
+ **/
+void i40e_dbg_pf_exit(struct i40e_pf *pf)
+{
+       debugfs_remove_recursive(pf->i40e_dbg_pf);
+       pf->i40e_dbg_pf = NULL;
+
+       kfree(i40e_dbg_dump_buf);
+       i40e_dbg_dump_buf = NULL;
+}
+
+/**
+ * i40e_dbg_init - start up debugfs for the driver
+ **/
+void i40e_dbg_init(void)
+{
+       i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
+       if (!i40e_dbg_root)
+               pr_info("init of debugfs failed\n");
+}
+
+/**
+ * i40e_dbg_exit - clean out the driver's debugfs entries
+ **/
+void i40e_dbg_exit(void)
+{
+       debugfs_remove_recursive(i40e_dbg_root);
+       i40e_dbg_root = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_devids.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_devids.h
new file mode 100644 (file)
index 0000000..eeaa4d8
--- /dev/null
@@ -0,0 +1,51 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#define _I40E_DEVIDS_H_
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID           0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710          0x1572
+#define I40E_DEV_ID_QEMU               0x1574
+#define I40E_DEV_ID_KX_A               0x157F
+#define I40E_DEV_ID_KX_B               0x1580
+#define I40E_DEV_ID_KX_C               0x1581
+#define I40E_DEV_ID_QSFP_A             0x1583
+#define I40E_DEV_ID_QSFP_B             0x1584
+#define I40E_DEV_ID_QSFP_C             0x1585
+#define I40E_DEV_ID_10G_BASE_T         0x1586
+#define I40E_DEV_ID_20G_KR2            0x1587
+#define I40E_DEV_ID_20G_KR2_A          0x1588
+#define I40E_DEV_ID_10G_BASE_T4                0x1589
+#define I40E_DEV_ID_VF                 0x154C
+#define I40E_DEV_ID_VF_HV              0x1571
+
+#define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
+                                        (d) == I40E_DEV_ID_QSFP_B  || \
+                                        (d) == I40E_DEV_ID_QSFP_C)
+
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_diag.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_diag.c
new file mode 100644 (file)
index 0000000..e9e65d7
--- /dev/null
@@ -0,0 +1,177 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_diag.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_diag_set_loopback
+ * @hw: pointer to the hw struct
+ * @mode: loopback mode
+ *
+ * Set chosen loopback mode
+ **/
+i40e_status i40e_diag_set_loopback(struct i40e_hw *hw,
+                                            enum i40e_lb_mode mode)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (i40e_aq_set_lb_modes(hw, mode, NULL))
+               ret_code = I40E_ERR_DIAG_TEST_FAILED;
+
+       return ret_code;
+}
+
+/**
+ * i40e_diag_reg_pattern_test
+ * @hw: pointer to the hw struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ **/
+static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+                                                       u32 reg, u32 mask)
+{
+       const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+       u32 pat, val, orig_val;
+       int i;
+
+       orig_val = rd32(hw, reg);
+       for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+               pat = patterns[i];
+               wr32(hw, reg, (pat & mask));
+               val = rd32(hw, reg);
+               if ((val & mask) != (pat & mask)) {
+#ifdef ETHTOOL_TEST
+                       i40e_debug(hw, I40E_DEBUG_DIAG,
+                                  "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
+                                  __func__, reg, pat, val);
+#endif
+                       return I40E_ERR_DIAG_TEST_FAILED;
+               }
+       }
+
+       wr32(hw, reg, orig_val);
+       val = rd32(hw, reg);
+       if (val != orig_val) {
+#ifdef ETHTOOL_TEST
+               i40e_debug(hw, I40E_DEBUG_DIAG,
+                          "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
+                          __func__, reg, orig_val, val);
+#endif
+               return I40E_ERR_DIAG_TEST_FAILED;
+       }
+
+       return I40E_SUCCESS;
+}
+
+struct i40e_diag_reg_test_info i40e_reg_list[] = {
+       /* offset               mask         elements   stride */
+       {I40E_QTX_CTL(0),       0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+       {I40E_PFINT_ITR0(0),    0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+       {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+       {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+       {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+       {I40E_PFINT_STAT_CTL0,  0x0000000C, 1, 0},
+       {I40E_PFINT_LNKLST0,    0x00001FFF, 1, 0},
+       {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+       {I40E_QINT_TQCTL(0),    0x000000FF, 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+       {I40E_QINT_RQCTL(0),    0x000000FF, 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+       {I40E_PFINT_ICR0_ENA,   0xF7F20000, 1, 0},
+       { 0 }
+};
+
+/**
+ * i40e_diag_reg_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform registers diagnostic test
+ **/
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       u32 reg, mask;
+       u32 i, j;
+
+       for (i = 0; i40e_reg_list[i].offset != 0 &&
+                                            ret_code == I40E_SUCCESS; i++) {
+
+               /* set actual reg range for dynamically allocated resources */
+               if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+                   hw->func_caps.num_tx_qp != 0)
+                       i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+               if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+                    i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+                    i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+                    i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+                    i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+                   hw->func_caps.num_msix_vectors != 0)
+                       i40e_reg_list[i].elements =
+                               hw->func_caps.num_msix_vectors - 1;
+
+               /* test register access */
+               mask = i40e_reg_list[i].mask;
+               for (j = 0; j < i40e_reg_list[i].elements &&
+                           ret_code == I40E_SUCCESS; j++) {
+                       reg = i40e_reg_list[i].offset
+                               + (j * i40e_reg_list[i].stride);
+                       ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+               }
+       }
+
+       return ret_code;
+}
+
+/**
+ * i40e_diag_eeprom_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform EEPROM diagnostic test
+ **/
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
+{
+       i40e_status ret_code;
+       u16 reg_val;
+
+       /* read NVM control word and if NVM valid, validate EEPROM checksum*/
+       ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
+       if ((ret_code == I40E_SUCCESS) &&
+           ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
+            BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+               return i40e_validate_nvm_checksum(hw, NULL);
+       else
+               return I40E_ERR_DIAG_TEST_FAILED;
+}
+
+/**
+ * i40e_diag_fw_alive_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform FW alive diagnostic test
+ **/
+i40e_status i40e_diag_fw_alive_test(struct i40e_hw *hw)
+{
+       return I40E_SUCCESS;
+}
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_diag.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_diag.h
new file mode 100644 (file)
index 0000000..0475439
--- /dev/null
@@ -0,0 +1,54 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DIAG_H_
+#define _I40E_DIAG_H_
+
+#include "i40e_type.h"
+
+enum i40e_lb_mode {
+       I40E_LB_MODE_NONE       = 0x0,
+       I40E_LB_MODE_PHY_LOCAL  = I40E_AQ_LB_PHY_LOCAL,
+       I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+       I40E_LB_MODE_MAC_LOCAL  = I40E_AQ_LB_MAC_LOCAL,
+};
+
+struct i40e_diag_reg_test_info {
+       u32 offset;     /* the base register */
+       u32 mask;       /* bits that can be tested */
+       u32 elements;   /* number of elements if array */
+       u32 stride;     /* bytes between each element */
+};
+
+extern struct i40e_diag_reg_test_info i40e_reg_list[];
+
+i40e_status i40e_diag_set_loopback(struct i40e_hw *hw,
+                                            enum i40e_lb_mode mode);
+i40e_status i40e_diag_fw_alive_test(struct i40e_hw *hw);
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+
+#endif /* _I40E_DIAG_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ethtool.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ethtool.c
new file mode 100644 (file)
index 0000000..db45651
--- /dev/null
@@ -0,0 +1,3635 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40e */
+
+#include "i40e.h"
+#include "i40e_diag.h"
+
+#ifdef SIOCETHTOOL
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+
+#endif
+#ifdef ETHTOOL_GSTATS
+struct i40e_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int sizeof_stat;
+       int stat_offset;
+};
+
+#define I40E_STAT(_type, _name, _stat) { \
+       .stat_string = _name, \
+       .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+       .stat_offset = offsetof(_type, _stat) \
+}
+
+#ifdef HAVE_NDO_GET_STATS64
+#define I40E_NETDEV_STAT(_net_stat) \
+               I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
+#else
+#define I40E_NETDEV_STAT(_net_stat) \
+               I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
+#endif
+#define I40E_PF_STAT(_name, _stat) \
+               I40E_STAT(struct i40e_pf, _name, _stat)
+#define I40E_VSI_STAT(_name, _stat) \
+               I40E_STAT(struct i40e_vsi, _name, _stat)
+#define I40E_VEB_STAT(_name, _stat) \
+               I40E_STAT(struct i40e_veb, _name, _stat)
+
+static const struct i40e_stats i40e_gstrings_net_stats[] = {
+       I40E_NETDEV_STAT(rx_packets),
+       I40E_NETDEV_STAT(tx_packets),
+       I40E_NETDEV_STAT(rx_bytes),
+       I40E_NETDEV_STAT(tx_bytes),
+       I40E_NETDEV_STAT(rx_errors),
+       I40E_NETDEV_STAT(tx_errors),
+       I40E_NETDEV_STAT(rx_dropped),
+       I40E_NETDEV_STAT(tx_dropped),
+       I40E_NETDEV_STAT(collisions),
+       I40E_NETDEV_STAT(rx_length_errors),
+       I40E_NETDEV_STAT(rx_crc_errors),
+};
+
+static const struct i40e_stats i40e_gstrings_veb_stats[] = {
+       I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
+       I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
+       I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
+       I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
+       I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
+       I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
+       I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
+       I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
+       I40E_VEB_STAT("rx_discards", stats.rx_discards),
+       I40E_VEB_STAT("tx_discards", stats.tx_discards),
+       I40E_VEB_STAT("tx_errors", stats.tx_errors),
+       I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
+};
+
+static const struct i40e_stats i40e_gstrings_misc_stats[] = {
+       I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
+       I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
+       I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
+       I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
+       I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
+       I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
+       I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+       I40E_VSI_STAT("tx_linearize", tx_linearize),
+};
+
+/* These PF_STATs might look like duplicates of some NETDEV_STATs,
+ * but they are separate.  This device supports Virtualization, and
+ * as such might have several netdevs supporting VMDq and FCoE going
+ * through a single port.  The NETDEV_STATs are for individual netdevs
+ * seen at the top of the stack, and the PF_STATs are for the physical
+ * function at the bottom of the stack hosting those netdevs.
+ *
+ * The PF_STATs are appended to the netdev stats only when ethtool -S
+ * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
+ */
+static struct i40e_stats i40e_gstrings_stats[] = {
+       I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
+       I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
+       I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
+       I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
+       I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
+       I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
+       I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
+       I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
+       I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
+       I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
+       I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
+       I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
+       I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
+       I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
+       I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+       I40E_PF_STAT("tx_timeout", tx_timeout_count),
+       I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
+       I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
+       I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
+       I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
+       I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
+       I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+       I40E_PF_STAT("rx_size_64", stats.rx_size_64),
+       I40E_PF_STAT("rx_size_127", stats.rx_size_127),
+       I40E_PF_STAT("rx_size_255", stats.rx_size_255),
+       I40E_PF_STAT("rx_size_511", stats.rx_size_511),
+       I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
+       I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
+       I40E_PF_STAT("rx_size_big", stats.rx_size_big),
+       I40E_PF_STAT("tx_size_64", stats.tx_size_64),
+       I40E_PF_STAT("tx_size_127", stats.tx_size_127),
+       I40E_PF_STAT("tx_size_255", stats.tx_size_255),
+       I40E_PF_STAT("tx_size_511", stats.tx_size_511),
+       I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
+       I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
+       I40E_PF_STAT("tx_size_big", stats.tx_size_big),
+       I40E_PF_STAT("rx_undersize", stats.rx_undersize),
+       I40E_PF_STAT("rx_fragments", stats.rx_fragments),
+       I40E_PF_STAT("rx_oversize", stats.rx_oversize),
+       I40E_PF_STAT("rx_jabber", stats.rx_jabber),
+       I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+#ifdef HAVE_PTP_1588_CLOCK
+       I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+#endif /* HAVE_PTP_1588_CLOCK */
+       I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
+       I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+       I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+       I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
+       I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+       I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
+#ifdef I40E_ADD_PROBES
+       I40E_PF_STAT("tx_tcp_segments", tcp_segs),
+       I40E_PF_STAT("tx_tcp_cso", tx_tcp_cso),
+       I40E_PF_STAT("tx_udp_cso", tx_udp_cso),
+       I40E_PF_STAT("tx_sctp_cso", tx_sctp_cso),
+       I40E_PF_STAT("tx_ip4_cso", tx_ip4_cso),
+       I40E_PF_STAT("rx_tcp_cso", rx_tcp_cso),
+       I40E_PF_STAT("rx_udp_cso", rx_udp_cso),
+       I40E_PF_STAT("rx_sctp_cso", rx_sctp_cso),
+       I40E_PF_STAT("rx_ip4_cso", rx_ip4_cso),
+       I40E_PF_STAT("rx_tcp_cso_error", rx_tcp_cso_err),
+       I40E_PF_STAT("rx_udp_cso_error", rx_udp_cso_err),
+       I40E_PF_STAT("rx_sctp_cso_error", rx_sctp_cso_err),
+       I40E_PF_STAT("rx_ip4_cso_error", rx_ip4_cso_err),
+#endif
+
+       /* LPI stats */
+       I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
+       I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
+       I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
+       I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
+};
+
+#ifdef I40E_FCOE
+static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
+       I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
+       I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
+       I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
+       I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
+       I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
+       I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
+       I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
+       I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
+};
+
+#endif /* I40E_FCOE */
+#define I40E_QUEUE_STATS_LEN(n) \
+       (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
+           * 2 /* tx and rx together */                                     \
+           * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
+#define I40E_GLOBAL_STATS_LEN  ARRAY_SIZE(i40e_gstrings_stats)
+#define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_MISC_STATS_LEN   ARRAY_SIZE(i40e_gstrings_misc_stats)
+#ifdef I40E_FCOE
+#define I40E_FCOE_STATS_LEN    ARRAY_SIZE(i40e_gstrings_fcoe_stats)
+#define I40E_VSI_STATS_LEN(n)  (I40E_NETDEV_STATS_LEN + \
+                                I40E_FCOE_STATS_LEN + \
+                                I40E_MISC_STATS_LEN + \
+                                I40E_QUEUE_STATS_LEN((n)))
+#else
+#define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
+                                I40E_MISC_STATS_LEN + \
+                                I40E_QUEUE_STATS_LEN((n)))
+#endif /* I40E_FCOE */
+#define I40E_PFC_STATS_LEN ( \
+               (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
+                FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
+                FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
+                FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
+                FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
+                / sizeof(u64))
+#define I40E_VEB_TC_STATS_LEN ( \
+               (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
+                FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
+                FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
+                FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
+                / sizeof(u64))
+#define I40E_VEB_STATS_LEN   ARRAY_SIZE(i40e_gstrings_veb_stats)
+#define I40E_VEB_STATS_TOTAL   (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
+#define I40E_PF_STATS_LEN(n)   (I40E_GLOBAL_STATS_LEN + \
+                                I40E_PFC_STATS_LEN + \
+                                I40E_VSI_STATS_LEN((n)))
+
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+enum i40e_ethtool_test_id {
+       I40E_ETH_TEST_REG = 0,
+       I40E_ETH_TEST_EEPROM,
+       I40E_ETH_TEST_INTR,
+       I40E_ETH_TEST_LOOPBACK,
+       I40E_ETH_TEST_LINK,
+};
+
+static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
+       "Register test  (offline)",
+       "Eeprom test    (offline)",
+       "Interrupt test (offline)",
+       "Loopback test  (offline)",
+       "Link test   (on/offline)"
+};
+
+#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+
+#endif /* ETHTOOL_TEST */
+
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+       "MFP",
+       "LinkPolling",
+       "flow-director-atr",
+       "veb-stats",
+};
+
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
+
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+
+/**
+ * i40e_partition_setting_complaint - generic complaint for MFP restriction
+ * @pf: the PF struct
+ **/
+static void i40e_partition_setting_complaint(struct i40e_pf *pf)
+{
+       dev_info(&pf->pdev->dev,
+                "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
+}
+
+/**
+ * i40e_get_settings_link_up - Get the Link settings for when link is up
+ * @hw: hw structure
+ * @ecmd: ethtool command to fill in
+ * @netdev: network interface device structure
+ *
+ * Reports link settings that can be determined when link is up
+ **/
+static void i40e_get_settings_link_up(struct i40e_hw *hw,
+                                     struct ethtool_cmd *ecmd,
+                                     struct net_device *netdev)
+{
+       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+       u32 link_speed = hw_link_info->link_speed;
+
+       /* Initialize supported and advertised settings based on phy settings */
+       switch (hw_link_info->phy_type) {
+       case I40E_PHY_TYPE_40GBASE_CR4:
+       case I40E_PHY_TYPE_40GBASE_CR4_CU:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_40000baseCR4_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_40000baseCR4_Full;
+               break;
+       case I40E_PHY_TYPE_XLAUI:
+       case I40E_PHY_TYPE_XLPPI:
+       case I40E_PHY_TYPE_40GBASE_AOC:
+               ecmd->supported = SUPPORTED_40000baseCR4_Full;
+               break;
+       case I40E_PHY_TYPE_40GBASE_SR4:
+               ecmd->supported = SUPPORTED_40000baseSR4_Full;
+               break;
+       case I40E_PHY_TYPE_40GBASE_LR4:
+               ecmd->supported = SUPPORTED_40000baseLR4_Full;
+               break;
+       case I40E_PHY_TYPE_10GBASE_SR:
+       case I40E_PHY_TYPE_10GBASE_LR:
+       case I40E_PHY_TYPE_1000BASE_SX:
+       case I40E_PHY_TYPE_1000BASE_LX:
+               ecmd->supported = SUPPORTED_10000baseT_Full;
+               if (hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_SX ||
+                   hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_LX) {
+                       ecmd->supported |= SUPPORTED_1000baseT_Full;
+                       if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               }
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_10GBASE_T:
+       case I40E_PHY_TYPE_1000BASE_T:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_10000baseT_Full |
+                                 SUPPORTED_1000baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_1000baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_1000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_100BASE_TX:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_100baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+               break;
+       case I40E_PHY_TYPE_10GBASE_CR1_CU:
+       case I40E_PHY_TYPE_10GBASE_CR1:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_10000baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_10000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_XAUI:
+       case I40E_PHY_TYPE_XFI:
+       case I40E_PHY_TYPE_SFI:
+       case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+       case I40E_PHY_TYPE_10GBASE_AOC:
+               ecmd->supported = SUPPORTED_10000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_SGMII:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_1000baseT_Full;
+               if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               break;
+       default:
+               /* if we got here and link is up something bad is afoot */
+               netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+                           hw_link_info->phy_type);
+       }
+
+       /* Set speed and duplex */
+       switch (link_speed) {
+       case I40E_LINK_SPEED_40GB:
+               ethtool_cmd_speed_set(ecmd, SPEED_40000);
+               break;
+       case I40E_LINK_SPEED_20GB:
+               ethtool_cmd_speed_set(ecmd, SPEED_20000);
+               break;
+       case I40E_LINK_SPEED_10GB:
+               ethtool_cmd_speed_set(ecmd, SPEED_10000);
+               break;
+       case I40E_LINK_SPEED_1GB:
+               ethtool_cmd_speed_set(ecmd, SPEED_1000);
+               break;
+       case I40E_LINK_SPEED_100MB:
+               ethtool_cmd_speed_set(ecmd, SPEED_100);
+               break;
+       default:
+               break;
+       }
+       ecmd->duplex = DUPLEX_FULL;
+}
+
+/**
+ * i40e_get_settings_link_down - Get the Link settings for when link is down
+ * @hw: hw structure
+ * @ecmd: ethtool command to fill in
+ *
+ * Reports link settings that can be determined when link is down
+ **/
+static void i40e_get_settings_link_down(struct i40e_hw *hw,
+                                     struct ethtool_cmd *ecmd)
+{
+       enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
+
+       /* link is down and the driver needs to fall back on
+        * supported phy types to figure out what info to display
+        */
+       ecmd->supported = 0x0;
+       ecmd->advertising = 0x0;
+       if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                               SUPPORTED_1000baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_1000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+           phy_types & I40E_CAP_PHY_TYPE_XFI ||
+           phy_types & I40E_CAP_PHY_TYPE_SFI ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+               ecmd->supported |= SUPPORTED_10000baseT_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+           phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_10000baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_10000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+           phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+           phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+               ecmd->supported |= SUPPORTED_40000baseCR4_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+           phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                 SUPPORTED_40000baseCR4_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                   ADVERTISED_40000baseCR4_Full;
+       }
+       if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+           !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_100baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_100baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+           phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_1000baseT_Full;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_1000baseT_Full;
+       }
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+               ecmd->supported |= SUPPORTED_40000baseSR4_Full;
+       if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+               ecmd->supported |= SUPPORTED_40000baseLR4_Full;
+
+       /* With no link speed and duplex are unknown */
+       ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+       ecmd->duplex = DUPLEX_UNKNOWN;
+}
+
+/**
+ * i40e_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings based on media_type
+ **/
+static int i40e_get_settings(struct net_device *netdev,
+                            struct ethtool_cmd *ecmd)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+       bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+
+       if (link_up)
+               i40e_get_settings_link_up(hw, ecmd, netdev);
+       else
+               i40e_get_settings_link_down(hw, ecmd);
+
+       /* Now set the settings that don't rely on link being up/down */
+
+       /* For backplane, supported and advertised are only reliant on the
+        * phy types the NVM specifies are supported.
+        */
+       if (hw->device_id == I40E_DEV_ID_KX_B ||
+           hw->device_id == I40E_DEV_ID_KX_C ||
+           hw->device_id == I40E_DEV_ID_20G_KR2 ||
+           hw->device_id ==  I40E_DEV_ID_20G_KR2_A) {
+               ecmd->supported = SUPPORTED_Autoneg;
+               ecmd->advertising = ADVERTISED_Autoneg;
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+                       ecmd->supported |= SUPPORTED_40000baseKR4_Full;
+                       ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+                       ecmd->supported |= SUPPORTED_20000baseKR2_Full;
+                       ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+                       ecmd->supported |= SUPPORTED_10000baseKR_Full;
+                       ecmd->advertising |= ADVERTISED_10000baseKR_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+                       ecmd->supported |= SUPPORTED_10000baseKX4_Full;
+                       ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
+               }
+               if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+                       ecmd->supported |= SUPPORTED_1000baseKX_Full;
+                       ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+               }
+       }
+
+       /* Set autoneg settings */
+       ecmd->autoneg = (hw_link_info->an_info & I40E_AQ_AN_COMPLETED ?
+                         AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+       /* Set media type settings */
+       switch (hw->phy.media_type) {
+       case I40E_MEDIA_TYPE_BACKPLANE:
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_Backplane;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_Backplane;
+               ecmd->port = PORT_NONE;
+               break;
+       case I40E_MEDIA_TYPE_BASET:
+               ecmd->supported |= SUPPORTED_TP;
+               ecmd->advertising |= ADVERTISED_TP;
+               ecmd->port = PORT_TP;
+               break;
+       case I40E_MEDIA_TYPE_DA:
+       case I40E_MEDIA_TYPE_CX4:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_FIBRE;
+               ecmd->port = PORT_DA;
+               break;
+       case I40E_MEDIA_TYPE_FIBER:
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->port = PORT_FIBRE;
+               break;
+       case I40E_MEDIA_TYPE_UNKNOWN:
+       default:
+               ecmd->port = PORT_OTHER;
+               break;
+       }
+
+       /* Set transceiver */
+       ecmd->transceiver = XCVR_EXTERNAL;
+
+       /* Set flow control settings */
+       ecmd->supported |= SUPPORTED_Pause;
+
+       switch (hw->fc.requested_mode) {
+       case I40E_FC_FULL:
+               ecmd->advertising |= ADVERTISED_Pause;
+               break;
+       case I40E_FC_TX_PAUSE:
+               ecmd->advertising |= ADVERTISED_Asym_Pause;
+               break;
+       case I40E_FC_RX_PAUSE:
+               ecmd->advertising |= (ADVERTISED_Pause |
+                                     ADVERTISED_Asym_Pause);
+               break;
+       default:
+               ecmd->advertising &= ~(ADVERTISED_Pause |
+                                      ADVERTISED_Asym_Pause);
+               break;
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_set_settings - Set Speed and Duplex
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Set speed/duplex per media_types advertised/forced
+ **/
+static int i40e_set_settings(struct net_device *netdev,
+                            struct ethtool_cmd *ecmd)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       struct i40e_aq_set_phy_config config;
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_hw *hw = &pf->hw;
+       struct ethtool_cmd safe_ecmd;
+       i40e_status status = 0;
+       bool change = false;
+       int err = 0;
+       u8 autoneg;
+       u32 advertise;
+       u32 old_ethtool_advertising = 0;
+
+       /* Changing port settings is not supported if this isn't the
+        * port's controlling PF
+        */
+       if (hw->partition_id != 1) {
+               i40e_partition_setting_complaint(pf);
+               return -EOPNOTSUPP;
+       }
+
+       if (vsi != pf->vsi[pf->lan_vsi])
+               return -EOPNOTSUPP;
+
+       if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
+           hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
+           hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
+           hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
+               return -EOPNOTSUPP;
+
+       if (hw->device_id == I40E_DEV_ID_KX_B ||
+           hw->device_id == I40E_DEV_ID_KX_C ||
+           hw->device_id == I40E_DEV_ID_20G_KR2 ||
+           hw->device_id == I40E_DEV_ID_20G_KR2_A) {
+               netdev_info(netdev, "Changing settings is not supported on backplane.\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* get our own copy of the bits to check against */
+       memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
+       i40e_get_settings(netdev, &safe_ecmd);
+
+       /* save autoneg and speed out of ecmd */
+       autoneg = ecmd->autoneg;
+       advertise = ecmd->advertising;
+
+       /* set autoneg and speed back to what they currently are */
+       ecmd->autoneg = safe_ecmd.autoneg;
+       ecmd->advertising = safe_ecmd.advertising;
+
+       /* Due to a bug in ethtool versions < 3.6 this check is necessary */
+       old_ethtool_advertising = ecmd->supported &
+                                 (ADVERTISED_10baseT_Half |
+                                  ADVERTISED_10baseT_Full |
+                                  ADVERTISED_100baseT_Half |
+                                  ADVERTISED_100baseT_Full |
+                                  ADVERTISED_1000baseT_Half |
+                                  ADVERTISED_1000baseT_Full |
+                                  ADVERTISED_2500baseX_Full |
+                                  ADVERTISED_10000baseT_Full);
+       old_ethtool_advertising |= (old_ethtool_advertising |
+                                  ADVERTISED_20000baseMLD2_Full |
+                                  ADVERTISED_20000baseKR2_Full);
+
+       if (advertise == old_ethtool_advertising)
+               netdev_info(netdev, "If you are not setting advertising to %x then you may have an old version of ethtool. Please update.\n",
+                           advertise);
+       ecmd->cmd = safe_ecmd.cmd;
+       /* If ecmd and safe_ecmd are not the same now, then they are
+        * trying to set something that we do not support
+        */
+       if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
+               return -EOPNOTSUPP;
+
+       while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
+               usleep_range(1000, 2000);
+
+       /* Get the current phy config */
+       status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+                                             NULL);
+       if (status)
+               return -EAGAIN;
+
+       /* Copy abilities to config in case autoneg is not
+        * set below
+        */
+       memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+       config.abilities = abilities.abilities;
+
+       /* Check autoneg */
+       if (autoneg == AUTONEG_ENABLE) {
+               /* If autoneg was not already enabled */
+               if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+                       /* If autoneg is not supported, return error */
+                       if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+                               netdev_info(netdev, "Autoneg not supported on this phy\n");
+                               return -EINVAL;
+                       }
+                       /* Autoneg is allowed to change */
+                       config.abilities = abilities.abilities |
+                                          I40E_AQ_PHY_ENABLE_AN;
+                       change = true;
+               }
+       } else {
+               /* If autoneg is currently enabled */
+               if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+                       /* If autoneg is supported 10GBASE_T is the only phy that
+                        * can disable it, so otherwise return error
+                        */
+                       if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+                           hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
+                               netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+                               return -EINVAL;
+                       }
+                       /* Autoneg is allowed to change */
+                       config.abilities = abilities.abilities &
+                                          ~I40E_AQ_PHY_ENABLE_AN;
+                       change = true;
+               }
+       }
+
+       if (advertise & ~safe_ecmd.supported)
+               return -EINVAL;
+
+       if (advertise & ADVERTISED_100baseT_Full)
+               config.link_speed |= I40E_LINK_SPEED_100MB;
+       if (advertise & ADVERTISED_1000baseT_Full ||
+           advertise & ADVERTISED_1000baseKX_Full)
+               config.link_speed |= I40E_LINK_SPEED_1GB;
+       if (advertise & ADVERTISED_10000baseT_Full ||
+           advertise & ADVERTISED_10000baseKX4_Full ||
+           advertise & ADVERTISED_10000baseKR_Full)
+               config.link_speed |= I40E_LINK_SPEED_10GB;
+       if (advertise & ADVERTISED_20000baseKR2_Full)
+               config.link_speed |= I40E_LINK_SPEED_20GB;
+       if (advertise & ADVERTISED_40000baseKR4_Full ||
+           advertise & ADVERTISED_40000baseCR4_Full ||
+           advertise & ADVERTISED_40000baseSR4_Full ||
+           advertise & ADVERTISED_40000baseLR4_Full)
+               config.link_speed |= I40E_LINK_SPEED_40GB;
+
+       /* If speed didn't get set, set it to what it currently is.
+        * This is needed because if advertise is 0 (as it is when autoneg
+        * is disabled) then speed won't get set.
+        */
+       if (!config.link_speed)
+               config.link_speed = abilities.link_speed;
+
+       if (change || (abilities.link_speed != config.link_speed)) {
+               /* copy over the rest of the abilities */
+               config.phy_type = abilities.phy_type;
+               config.eee_capability = abilities.eee_capability;
+               config.eeer = abilities.eeer_val;
+               config.low_power_ctrl = abilities.d3_lpan;
+
+               /* save the requested speeds */
+               hw->phy.link_info.requested_speeds = config.link_speed;
+               /* set link and an so changes take effect */
+               config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+               /* If link is up put link down */
+               if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
+                       /* Tell the OS link is going down, the link will go back up when fw
+                        * says it is ready asynchronously
+                        */
+                       i40e_print_link_message(vsi, false);
+                       netif_carrier_off(netdev);
+                       netif_tx_stop_all_queues(netdev);
+               }
+
+               /* make the aq call */
+               status = i40e_aq_set_phy_config(hw, &config, NULL);
+               if (status) {
+                       netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+                                   i40e_stat_str(hw, status),
+                                   i40e_aq_str(hw, hw->aq.asq_last_status));
+                       return -EAGAIN;
+               }
+
+               status = i40e_update_link_info(hw);
+               if (status)
+                       netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
+                                   i40e_stat_str(hw, status),
+                                   i40e_aq_str(hw, hw->aq.asq_last_status));
+
+       } else {
+               netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
+       }
+
+       return err;
+}
+
+static int i40e_nway_reset(struct net_device *netdev)
+{
+       /* restart autonegotiation */
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+       i40e_status ret = 0;
+
+       ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
+       if (ret) {
+               netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+                           i40e_stat_str(hw, ret),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_get_pauseparam -  Get Flow Control status
+ * Return tx/rx-pause status
+ **/
+static void i40e_get_pauseparam(struct net_device *netdev,
+                               struct ethtool_pauseparam *pause)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+
+       pause->autoneg = hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED;
+
+       /* PFC enabled so report LFC as off */
+       if (dcbx_cfg->pfc.pfcenable) {
+               pause->rx_pause = 0;
+               pause->tx_pause = 0;
+               return;
+       }
+
+       if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
+               pause->rx_pause = 1;
+       } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
+               pause->tx_pause = 1;
+       } else if (hw->fc.current_mode == I40E_FC_FULL) {
+               pause->rx_pause = 1;
+               pause->tx_pause = 1;
+       }
+}
+
+/**
+ * i40e_set_pauseparam - Set Flow Control parameter
+ * @netdev: network interface device structure
+ * @pause: return tx/rx flow control status
+ **/
+static int i40e_set_pauseparam(struct net_device *netdev,
+                              struct ethtool_pauseparam *pause)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+       struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+       bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+       i40e_status status;
+       u8 aq_failures;
+       int err = 0;
+
+       /* Changing the port's flow control is not supported if this isn't the
+        * port's controlling PF
+        */
+       if (hw->partition_id != 1) {
+               i40e_partition_setting_complaint(pf);
+               return -EOPNOTSUPP;
+       }
+
+       if (vsi != pf->vsi[pf->lan_vsi])
+               return -EOPNOTSUPP;
+
+       if (pause->autoneg != (hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+               netdev_info(netdev,
+                       "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* If we have link and don't have autoneg */
+       if (!test_bit(__I40E_DOWN, &pf->state) &&
+           !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+               /* Send message that it might not necessarily work*/
+               netdev_info(netdev,
+                        "Autoneg did not complete so changing settings may not result in an actual change.\n");
+       }
+
+       if (dcbx_cfg->pfc.pfcenable) {
+               netdev_info(netdev,
+                        "Priority flow control enabled. Cannot set link flow control.\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (pause->rx_pause && pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_FULL;
+       else if (pause->rx_pause && !pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_RX_PAUSE;
+       else if (!pause->rx_pause && pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_TX_PAUSE;
+       else if (!pause->rx_pause && !pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_NONE;
+       else
+                return -EINVAL;
+
+       /* Tell the OS link is going down, the link will go back up when fw
+        * says it is ready asynchronously
+        */
+       i40e_print_link_message(vsi, false);
+       netif_carrier_off(netdev);
+       netif_tx_stop_all_queues(netdev);
+
+       /* Set the fc mode and only restart an if link is up*/
+       status = i40e_set_fc(hw, &aq_failures, link_up);
+
+       if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
+               netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+                        i40e_stat_str(hw, status),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+               err = -EAGAIN;
+       }
+       if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
+               netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+                        i40e_stat_str(hw, status),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+               err = -EAGAIN;
+       }
+       if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
+               netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+                        i40e_stat_str(hw, status),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+               err = -EAGAIN;
+       }
+
+       if (!test_bit(__I40E_DOWN, &pf->state)) {
+               /* Give it a little more time to try to come back */
+               msleep(75);
+               if (!test_bit(__I40E_DOWN, &pf->state))
+                       return i40e_nway_reset(netdev);
+       }
+
+       return err;
+}
+
+#ifndef HAVE_NDO_SET_FEATURES
+static u32 i40e_get_rx_csum(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       return pf->flags & I40E_FLAG_RX_CSUM_ENABLED;
+}
+
+static int i40e_set_rx_csum(struct net_device *netdev, u32 data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       if (data)
+               pf->flags |= I40E_FLAG_RX_CSUM_ENABLED;
+       else
+               pf->flags &= ~I40E_FLAG_RX_CSUM_ENABLED;
+
+       return 0;
+}
+
+static u32 i40e_get_tx_csum(struct net_device *netdev)
+{
+       return (netdev->features & NETIF_F_IP_CSUM) != 0;
+}
+
+static int i40e_set_tx_csum(struct net_device *netdev, u32 data)
+{
+       if (data) {
+#ifdef NETIF_F_IPV6_CSUM
+               netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+#else
+               netdev->features |= NETIF_F_IP_CSUM;
+#endif
+               netdev->features |= NETIF_F_SCTP_CSUM;
+       } else {
+#ifdef NETIF_F_IPV6_CSUM
+               netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                     NETIF_F_SCTP_CSUM);
+#else
+               netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SCTP_CSUM);
+#endif
+       }
+
+       return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int i40e_set_tso(struct net_device *netdev, u32 data)
+{
+       if (data) {
+               netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+               netdev->features |= NETIF_F_TSO6;
+#endif
+       } else {
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+               struct i40e_netdev_priv *np = netdev_priv(netdev);
+               /* disable TSO on all VLANs if they're present */
+               if (np->vsi->vlgrp) {
+                       int i;
+                       struct net_device *v_netdev;
+                       for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+                               v_netdev =
+                                      vlan_group_get_device(np->vsi->vlgrp, i);
+                               if (v_netdev) {
+                                       v_netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+                                       v_netdev->features &= ~NETIF_F_TSO6;
+#endif
+                                       vlan_group_set_device(np->vsi->vlgrp, i,
+                                                             v_netdev);
+                               }
+                       }
+               }
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+               netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+               netdev->features &= ~NETIF_F_TSO6;
+#endif
+       }
+
+       return 0;
+}
+#endif /* NETIF_F_TSO */
+#ifdef ETHTOOL_GFLAGS
+static int i40e_set_flags(struct net_device *netdev, u32 data)
+{
+#ifdef ETHTOOL_GRXRINGS
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+#endif
+       u32 supported_flags = 0;
+       bool need_reset = false;
+       int rc;
+
+#ifdef NETIF_F_RXHASH
+       supported_flags |= ETH_FLAG_RXHASH;
+
+#endif
+#ifdef ETHTOOL_GRXRINGS
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               supported_flags |= ETH_FLAG_NTUPLE;
+#endif
+       rc = ethtool_op_set_flags(netdev, data, supported_flags);
+       if (rc)
+               return rc;
+
+       /* if state changes we need to update pf->flags and maybe reset */
+#ifdef ETHTOOL_GRXRINGS
+       need_reset = i40e_set_ntuple(pf, netdev->features);
+#endif /* ETHTOOL_GRXRINGS */
+       if (need_reset)
+               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+
+       return 0;
+}
+#endif /* ETHTOOL_GFLAGS */
+
+#endif /* HAVE_NDO_SET_FEATURES */
+static u32 i40e_get_msglevel(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       return pf->msg_enable;
+}
+
+static void i40e_set_msglevel(struct net_device *netdev, u32 data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       if (I40E_DEBUG_USER & data)
+               pf->hw.debug_mask = data;
+       pf->msg_enable = data;
+}
+
+static int i40e_get_regs_len(struct net_device *netdev)
+{
+       int reg_count = 0;
+       int i;
+
+       for (i = 0; i40e_reg_list[i].offset != 0; i++)
+               reg_count += i40e_reg_list[i].elements;
+
+       return reg_count * sizeof(u32);
+}
+
+static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+                         void *p)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 *reg_buf = p;
+       int i, j, ri;
+       u32 reg;
+
+       /* Tell ethtool which driver-version-specific regs output we have.
+        *
+        * At some point, if we have ethtool doing special formatting of
+        * this data, it will rely on this version number to know how to
+        * interpret things.  Hence, this needs to be updated if/when the
+        * diags register table is changed.
+        */
+       regs->version = 1;
+
+       /* loop through the diags reg table for what to print */
+       ri = 0;
+       for (i = 0; i40e_reg_list[i].offset != 0; i++) {
+               for (j = 0; j < i40e_reg_list[i].elements; j++) {
+                       reg = i40e_reg_list[i].offset
+                               + (j * i40e_reg_list[i].stride);
+                       reg_buf[ri++] = rd32(hw, reg);
+               }
+       }
+
+}
+
+static int i40e_get_eeprom(struct net_device *netdev,
+                          struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_hw *hw = &np->vsi->back->hw;
+       struct i40e_pf *pf = np->vsi->back;
+       int ret_val = 0, len, offset;
+       u8 *eeprom_buff;
+       u16 i, sectors;
+       bool last;
+       u32 magic;
+
+#define I40E_NVM_SECTOR_SIZE  4096
+       if (eeprom->len == 0)
+               return -EINVAL;
+
+       /* check for NVMUpdate access method */
+       magic = hw->vendor_id | (hw->device_id << 16);
+       if (eeprom->magic && eeprom->magic != magic) {
+               struct i40e_nvm_access *cmd;
+               int errno;
+
+               /* make sure it is the right magic for NVMUpdate */
+               if ((eeprom->magic >> 16) != hw->device_id)
+                       return -EINVAL;
+
+               cmd = (struct i40e_nvm_access *)eeprom;
+               ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
+               if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
+                       dev_info(&pf->pdev->dev,
+                                "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
+                                ret_val, hw->aq.asq_last_status, errno,
+                                (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
+                                cmd->offset, cmd->data_size);
+
+               return errno;
+       }
+
+       /* normal ethtool get_eeprom support */
+       eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+       eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
+       if (!eeprom_buff)
+               return -ENOMEM;
+
+       ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+       if (ret_val) {
+               dev_info(&pf->pdev->dev,
+                        "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+                        ret_val, hw->aq.asq_last_status);
+               goto free_buff;
+       }
+
+       sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
+       sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
+       len = I40E_NVM_SECTOR_SIZE;
+       last = false;
+       for (i = 0; i < sectors; i++) {
+               if (i == (sectors - 1)) {
+                       len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
+                       last = true;
+               }
+               offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+               ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
+                               (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+                               last, NULL);
+               if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+                       dev_info(&pf->pdev->dev,
+                                "read NVM failed, invalid offset 0x%x\n",
+                                offset);
+                       break;
+               } else if (ret_val &&
+                          hw->aq.asq_last_status == I40E_AQ_RC_EACCES) {
+                       dev_info(&pf->pdev->dev,
+                                "read NVM failed, access, offset 0x%x\n",
+                                offset);
+                       break;
+               } else if (ret_val) {
+                       dev_info(&pf->pdev->dev,
+                                "read NVM failed offset %d err=%d status=0x%x\n",
+                                offset, ret_val, hw->aq.asq_last_status);
+                       break;
+               }
+       }
+
+       i40e_release_nvm(hw);
+       memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
+free_buff:
+       kfree(eeprom_buff);
+       return ret_val;
+}
+
+static int i40e_get_eeprom_len(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_hw *hw = &np->vsi->back->hw;
+       u32 val;
+
+       val = (rd32(hw, I40E_GLPCI_LBARCTRL)
+               & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
+               >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+       /* register returns value in power of 2, 64Kbyte chunks. */
+       val = (64 * 1024) * BIT(val);
+       return val;
+}
+
+static int i40e_set_eeprom(struct net_device *netdev,
+                          struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_hw *hw = &np->vsi->back->hw;
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_nvm_access *cmd;
+       int ret_val = 0;
+       int errno;
+       u32 magic;
+
+       /* normal ethtool set_eeprom is not supported */
+       magic = hw->vendor_id | (hw->device_id << 16);
+       if (eeprom->magic == magic)
+               return -EOPNOTSUPP;
+
+       /* check for NVMUpdate access method */
+       if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
+               return -EINVAL;
+
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+           test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+               return -EBUSY;
+
+       cmd = (struct i40e_nvm_access *)eeprom;
+       ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
+       if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
+               dev_info(&pf->pdev->dev,
+                        "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
+                        ret_val, hw->aq.asq_last_status, errno,
+                        (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK),
+                        cmd->offset, cmd->data_size);
+
+       return errno;
+}
+
+static void i40e_get_drvinfo(struct net_device *netdev,
+                            struct ethtool_drvinfo *drvinfo)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+
+       strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, i40e_driver_version_str,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
+               sizeof(drvinfo->fw_version));
+       strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+               sizeof(drvinfo->bus_info));
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+       drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
+#endif
+}
+
+static void i40e_get_ringparam(struct net_device *netdev,
+                              struct ethtool_ringparam *ring)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+       ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+       ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+       ring->rx_mini_max_pending = 0;
+       ring->rx_jumbo_max_pending = 0;
+       ring->rx_pending = vsi->rx_rings[0]->count;
+       ring->tx_pending = vsi->tx_rings[0]->count;
+       ring->rx_mini_pending = 0;
+       ring->rx_jumbo_pending = 0;
+}
+
+static int i40e_set_ringparam(struct net_device *netdev,
+                             struct ethtool_ringparam *ring)
+{
+       struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       u32 new_rx_count, new_tx_count;
+       int i, err = 0;
+
+       if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+               return -EINVAL;
+
+       if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+           ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
+           ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+           ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
+               netdev_info(netdev,
+                           "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+                           ring->tx_pending, ring->rx_pending,
+                           I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+               return -EINVAL;
+       }
+
+       new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+       new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+       /* if nothing to do return success */
+       if ((new_tx_count == vsi->tx_rings[0]->count) &&
+           (new_rx_count == vsi->rx_rings[0]->count))
+               return 0;
+
+       while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+               usleep_range(1000, 2000);
+
+       if (!netif_running(vsi->netdev)) {
+               /* simple case - set for the next time the netdev is started */
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       vsi->tx_rings[i]->count = new_tx_count;
+                       vsi->rx_rings[i]->count = new_rx_count;
+               }
+               goto done;
+       }
+
+       /* We can't just free everything and then setup again,
+        * because the ISRs in MSI-X mode get passed pointers
+        * to the Tx and Rx ring structs.
+        */
+
+       /* alloc updated Tx resources */
+       if (new_tx_count != vsi->tx_rings[0]->count) {
+               netdev_info(netdev,
+                           "Changing Tx descriptor count from %d to %d.\n",
+                           vsi->tx_rings[0]->count, new_tx_count);
+               tx_rings = kcalloc(vsi->alloc_queue_pairs,
+                                  sizeof(struct i40e_ring), GFP_KERNEL);
+               if (!tx_rings) {
+                       err = -ENOMEM;
+                       goto done;
+               }
+
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       /* clone ring and setup updated count */
+                       tx_rings[i] = *vsi->tx_rings[i];
+                       tx_rings[i].count = new_tx_count;
+                       /* the desc and bi pointers will be reallocated in the
+                        * setup call
+                        */
+                       tx_rings[i].desc = NULL;
+                       tx_rings[i].rx_bi = NULL;
+                       err = i40e_setup_tx_descriptors(&tx_rings[i]);
+                       if (err) {
+                               while (i) {
+                                       i--;
+                                       i40e_free_tx_resources(&tx_rings[i]);
+                               }
+                               kfree(tx_rings);
+                               tx_rings = NULL;
+
+                               goto done;
+                       }
+               }
+       }
+
+       /* alloc updated Rx resources */
+       if (new_rx_count != vsi->rx_rings[0]->count) {
+               netdev_info(netdev,
+                           "Changing Rx descriptor count from %d to %d\n",
+                           vsi->rx_rings[0]->count, new_rx_count);
+               rx_rings = kcalloc(vsi->alloc_queue_pairs,
+                                  sizeof(struct i40e_ring), GFP_KERNEL);
+               if (!rx_rings) {
+                       err = -ENOMEM;
+                       goto free_tx;
+               }
+
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       /* clone ring and setup updated count */
+                       rx_rings[i] = *vsi->rx_rings[i];
+                       rx_rings[i].count = new_rx_count;
+                       /* the desc and bi pointers will be reallocated in the
+                        * setup call
+                        */
+                       rx_rings[i].desc = NULL;
+                       rx_rings[i].rx_bi = NULL;
+                       err = i40e_setup_rx_descriptors(&rx_rings[i]);
+                       if (err) {
+                               while (i) {
+                                       i--;
+                                       i40e_free_rx_resources(&rx_rings[i]);
+                               }
+                               kfree(rx_rings);
+                               rx_rings = NULL;
+
+                               goto free_tx;
+                       }
+               }
+       }
+
+       /* Bring interface down, copy in the new ring info,
+        * then restore the interface
+        */
+       i40e_down(vsi);
+
+       if (tx_rings) {
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       i40e_free_tx_resources(vsi->tx_rings[i]);
+                       *vsi->tx_rings[i] = tx_rings[i];
+               }
+               kfree(tx_rings);
+               tx_rings = NULL;
+       }
+
+       if (rx_rings) {
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       i40e_free_rx_resources(vsi->rx_rings[i]);
+                       *vsi->rx_rings[i] = rx_rings[i];
+               }
+               kfree(rx_rings);
+               rx_rings = NULL;
+       }
+
+       i40e_up(vsi);
+
+free_tx:
+       /* error cleanup if the Rx allocations failed after getting Tx */
+       if (tx_rings) {
+               for (i = 0; i < vsi->num_queue_pairs; i++)
+                       i40e_free_tx_resources(&tx_rings[i]);
+               kfree(tx_rings);
+               tx_rings = NULL;
+       }
+
+done:
+       clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+
+       return err;
+}
+
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+static int i40e_get_stats_count(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+
+       if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
+               if ((pf->lan_veb != I40E_NO_VEB) &&
+                   (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
+                       return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL;
+               else
+                       return I40E_PF_STATS_LEN(netdev);
+       else
+               return I40E_VSI_STATS_LEN(netdev);
+}
+
+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
+static int i40e_get_sset_count(struct net_device *netdev, int sset)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+
+       switch (sset) {
+       case ETH_SS_TEST:
+               return I40E_TEST_LEN;
+       case ETH_SS_STATS:
+               if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
+                       int len = I40E_PF_STATS_LEN(netdev);
+
+                       if ((pf->lan_veb != I40E_NO_VEB) &&
+                           (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
+                               len += I40E_VEB_STATS_TOTAL;
+                       return len;
+               } else {
+                       return I40E_VSI_STATS_LEN(netdev);
+               }
+       case ETH_SS_PRIV_FLAGS:
+               return I40E_PRIV_FLAGS_STR_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+static void i40e_get_ethtool_stats(struct net_device *netdev,
+                                  struct ethtool_stats *stats, u64 *data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_ring *tx_ring, *rx_ring;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       int i = 0;
+       char *p;
+       int j;
+
+#ifdef HAVE_NDO_GET_STATS64
+       struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+       unsigned int start;
+#else
+       struct net_device_stats *net_stats = i40e_get_vsi_stats_struct(vsi);
+#endif
+
+       i40e_update_stats(vsi);
+
+       for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
+               p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
+               data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
+                       sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       }
+       for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
+               p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
+               data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
+                       sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       }
+#ifdef I40E_FCOE
+       for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
+               p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
+               data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
+                       sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       }
+#endif
+       rcu_read_lock();
+       for (j = 0; j < vsi->num_queue_pairs; j++) {
+               tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+
+               if (!tx_ring)
+                       continue;
+
+               /* process Tx ring statistics */
+#ifdef HAVE_NDO_GET_STATS64
+               do {
+                       start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
+#endif
+                       data[i] = tx_ring->stats.packets;
+                       data[i + 1] = tx_ring->stats.bytes;
+#ifdef HAVE_NDO_GET_STATS64
+               } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+#endif
+               i += 2;
+
+               /* Rx ring is the 2nd half of the queue pair */
+               rx_ring = &tx_ring[1];
+#ifdef HAVE_NDO_GET_STATS64
+               do {
+                       start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
+#endif
+                       data[i] = rx_ring->stats.packets;
+                       data[i + 1] = rx_ring->stats.bytes;
+#ifdef HAVE_NDO_GET_STATS64
+               } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+#endif
+               i += 2;
+       }
+       rcu_read_unlock();
+       if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
+               if ((pf->lan_veb != I40E_NO_VEB) &&
+                   (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
+                       struct i40e_veb *veb = pf->veb[pf->lan_veb];
+
+                       for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
+                               p = (char *)veb + i40e_gstrings_veb_stats[j].stat_offset;
+                               data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
+                                          sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+                       }
+                       for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
+                               data[i++] = veb->tc_stats.tc_tx_packets[j];
+                               data[i++] = veb->tc_stats.tc_tx_bytes[j];
+                               data[i++] = veb->tc_stats.tc_rx_packets[j];
+                               data[i++] = veb->tc_stats.tc_rx_bytes[j];
+                       }
+               }
+               for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+                       p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+                       data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+                                  sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+               }
+               for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+                       data[i++] = pf->stats.priority_xon_tx[j];
+                       data[i++] = pf->stats.priority_xoff_tx[j];
+               }
+               for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+                       data[i++] = pf->stats.priority_xon_rx[j];
+                       data[i++] = pf->stats.priority_xoff_rx[j];
+               }
+               for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+                       data[i++] = pf->stats.priority_xon_2_xoff[j];
+       }
+}
+
+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
+                            u8 *data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       char *p = (char *)data;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_TEST:
+               for (i = 0; i < I40E_TEST_LEN; i++) {
+                       memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
+                       data += ETH_GSTRING_LEN;
+               }
+               break;
+       case ETH_SS_STATS:
+               for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
+                       snprintf(p, ETH_GSTRING_LEN, "%s",
+                                i40e_gstrings_net_stats[i].stat_string);
+                       p += ETH_GSTRING_LEN;
+               }
+               for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
+                       snprintf(p, ETH_GSTRING_LEN, "%s",
+                                i40e_gstrings_misc_stats[i].stat_string);
+                       p += ETH_GSTRING_LEN;
+               }
+#ifdef I40E_FCOE
+               for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
+                       snprintf(p, ETH_GSTRING_LEN, "%s",
+                                i40e_gstrings_fcoe_stats[i].stat_string);
+                       p += ETH_GSTRING_LEN;
+               }
+#endif
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
+                       p += ETH_GSTRING_LEN;
+               }
+               if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
+                       if ((pf->lan_veb != I40E_NO_VEB) &&
+                           (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
+                               for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
+                                       snprintf(p, ETH_GSTRING_LEN, "veb.%s",
+                                                i40e_gstrings_veb_stats[i].stat_string);
+                                       p += ETH_GSTRING_LEN;
+                               }
+                               for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                "veb.tc_%u_tx_packets", i);
+                                       p += ETH_GSTRING_LEN;
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                "veb.tc_%u_tx_bytes", i);
+                                       p += ETH_GSTRING_LEN;
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                "veb.tc_%u_rx_packets", i);
+                                       p += ETH_GSTRING_LEN;
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                "veb.tc_%u_rx_bytes", i);
+                                       p += ETH_GSTRING_LEN;
+                               }
+                       }
+                       for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+                               snprintf(p, ETH_GSTRING_LEN, "port.%s",
+                                        i40e_gstrings_stats[i].stat_string);
+                               p += ETH_GSTRING_LEN;
+                       }
+                       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+                               snprintf(p, ETH_GSTRING_LEN,
+                                        "port.tx_priority_%u_xon", i);
+                               p += ETH_GSTRING_LEN;
+                               snprintf(p, ETH_GSTRING_LEN,
+                                        "port.tx_priority_%u_xoff", i);
+                               p += ETH_GSTRING_LEN;
+                       }
+                       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+                               snprintf(p, ETH_GSTRING_LEN,
+                                        "port.rx_priority_%u_xon", i);
+                               p += ETH_GSTRING_LEN;
+                               snprintf(p, ETH_GSTRING_LEN,
+                                        "port.rx_priority_%u_xoff", i);
+                               p += ETH_GSTRING_LEN;
+                       }
+                       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+                               snprintf(p, ETH_GSTRING_LEN,
+                                        "port.rx_priority_%u_xon_2_xoff", i);
+                               p += ETH_GSTRING_LEN;
+                       }
+               }
+               /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
+               break;
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+       case ETH_SS_PRIV_FLAGS:
+               for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+                       memcpy(data, i40e_priv_flags_strings[i],
+                              ETH_GSTRING_LEN);
+                       data += ETH_GSTRING_LEN;
+               }
+               break;
+#endif
+       default:
+               break;
+       }
+}
+
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+static int i40e_get_ts_info(struct net_device *dev,
+                           struct ethtool_ts_info *info)
+{
+#ifdef HAVE_PTP_1588_CLOCK
+       struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+       /* only report HW timestamping if PTP is enabled */
+       if (!(pf->flags & I40E_FLAG_PTP))
+               return ethtool_op_get_ts_info(dev, info);
+
+       info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                               SOF_TIMESTAMPING_RX_SOFTWARE |
+                               SOF_TIMESTAMPING_SOFTWARE |
+                               SOF_TIMESTAMPING_TX_HARDWARE |
+                               SOF_TIMESTAMPING_RX_HARDWARE |
+                               SOF_TIMESTAMPING_RAW_HARDWARE;
+
+       if (pf->ptp_clock)
+               info->phc_index = ptp_clock_index(pf->ptp_clock);
+       else
+               info->phc_index = -1;
+
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+       return 0;
+#else /* HAVE_PTP_1588_CLOCK */
+       return ethtool_op_get_ts_info(dev, info);
+#endif /* HAVE_PTP_1588_CLOCK */
+}
+
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+static int i40e_link_test(struct net_device *netdev, u64 *data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       i40e_status status;
+       bool link_up = false;
+
+       netif_info(pf, hw, netdev, "link test\n");
+       status = i40e_get_link_status(&pf->hw, &link_up);
+       if (status != I40E_SUCCESS) {
+               netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
+               *data = 1;
+               return *data;
+       }
+
+       if (link_up)
+               *data = 0;
+       else
+               *data = 1;
+
+       return *data;
+}
+
+static int i40e_reg_test(struct net_device *netdev, u64 *data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       netif_info(pf, hw, netdev, "register test\n");
+       *data = i40e_diag_reg_test(&pf->hw);
+
+       return *data;
+}
+
+static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       netif_info(pf, hw, netdev, "eeprom test\n");
+       *data = i40e_diag_eeprom_test(&pf->hw);
+
+       /* forcebly clear the NVM Update state machine */
+       pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+       return *data;
+}
+
+static int i40e_intr_test(struct net_device *netdev, u64 *data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       u16 swc_old = pf->sw_int_count;
+
+       netif_info(pf, hw, netdev, "interrupt test\n");
+       wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
+            (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+             I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+             I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
+             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
+             I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
+       usleep_range(1000, 2000);
+       *data = (swc_old == pf->sw_int_count);
+
+       return *data;
+}
+
+static int i40e_loopback_test(struct net_device *netdev, u64 *data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       netif_info(pf, hw, netdev, "loopback test not implemented\n");
+       *data = 0;
+
+       return *data;
+}
+
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+static int i40e_diag_test_count(struct net_device *netdev)
+{
+       return I40E_TEST_LEN;
+}
+
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+       struct i40e_vf *vfs = pf->vf;
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++)
+               if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
+                       return true;
+       return false;
+}
+
+static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
+{
+       struct i40e_vsi **vsi = pf->vsi;
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (!vsi[i])
+                       continue;
+               if (vsi[i]->type == I40E_VSI_VMDQ2)
+                       return true;
+       }
+
+       return false;
+}
+
+static void i40e_diag_test(struct net_device *netdev,
+                          struct ethtool_test *eth_test, u64 *data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       bool if_running = netif_running(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+               /* Offline tests */
+               netif_info(pf, drv, netdev, "offline testing starting\n");
+
+               set_bit(__I40E_TESTING, &pf->state);
+
+               if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
+                       dev_warn(&pf->pdev->dev,
+                                "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
+                       data[I40E_ETH_TEST_REG]         = 1;
+                       data[I40E_ETH_TEST_EEPROM]      = 1;
+                       data[I40E_ETH_TEST_INTR]        = 1;
+                       data[I40E_ETH_TEST_LOOPBACK]    = 1;
+                       data[I40E_ETH_TEST_LINK]        = 1;
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+                       clear_bit(__I40E_TESTING, &pf->state);
+                       goto skip_ol_tests;
+               }
+
+               /* If the device is online then take it offline */
+               if (if_running)
+                       /* indicate we're in test mode */
+                       dev_close(netdev);
+               else
+                       /* This reset does not affect link - if it is
+                        * changed to a type of reset that does affect
+                        * link then the following link test would have
+                        * to be moved to before the reset
+                        */
+                       i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+
+               if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+
+               if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+
+               if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+
+               if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+
+               /* run reg test last, a reset is required after it */
+               if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+
+               clear_bit(__I40E_TESTING, &pf->state);
+               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
+
+               if (if_running)
+                       dev_open(netdev);
+       } else {
+               /* Online tests */
+               netif_info(pf, drv, netdev, "online testing starting\n");
+
+               if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+
+               /* Offline only tests, not run in online; pass by default */
+               data[I40E_ETH_TEST_REG] = 0;
+               data[I40E_ETH_TEST_EEPROM] = 0;
+               data[I40E_ETH_TEST_INTR] = 0;
+               data[I40E_ETH_TEST_LOOPBACK] = 0;
+       }
+
+skip_ol_tests:
+
+       netif_info(pf, drv, netdev, "testing finished\n");
+}
+
+static void i40e_get_wol(struct net_device *netdev,
+                        struct ethtool_wolinfo *wol)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u16 wol_nvm_bits;
+
+       /* NVM bit on means WoL disabled for the port */
+       i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+       if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
+               wol->supported = 0;
+               wol->wolopts = 0;
+       } else {
+               wol->supported = WAKE_MAGIC;
+               wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
+       }
+}
+
+/**
+ * i40e_set_wol - set the WakeOnLAN configuration
+ * @netdev: the netdev in question
+ * @wol: the ethtool WoL setting data
+ **/
+static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_hw *hw = &pf->hw;
+       u16 wol_nvm_bits;
+
+       /* WoL not supported if this isn't the controlling PF on the port */
+       if (hw->partition_id != 1) {
+               i40e_partition_setting_complaint(pf);
+               return -EOPNOTSUPP;
+       }
+
+       if (vsi != pf->vsi[pf->lan_vsi])
+               return -EOPNOTSUPP;
+
+       /* NVM bit on means WoL disabled for the port */
+       i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+       if (BIT(hw->port) & wol_nvm_bits)
+               return -EOPNOTSUPP;
+
+       /* only magic packet is supported */
+       if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+               return -EOPNOTSUPP;
+
+       /* is this a new value? */
+       if (pf->wol_en != !!wol->wolopts) {
+               pf->wol_en = !!wol->wolopts;
+               device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+       }
+
+       return 0;
+}
+
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+static int i40e_set_phys_id(struct net_device *netdev,
+                           enum ethtool_phys_id_state state)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       int blink_freq = 2;
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               pf->led_status = i40e_led_get(hw);
+               return blink_freq;
+       case ETHTOOL_ID_ON:
+               i40e_led_set(hw, 0xF, false);
+               break;
+       case ETHTOOL_ID_OFF:
+               i40e_led_set(hw, 0x0, false);
+               break;
+       case ETHTOOL_ID_INACTIVE:
+               i40e_led_set(hw, pf->led_status, false);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+#else /* HAVE_ETHTOOL_SET_PHYS_ID */
+static int i40e_phys_id(struct net_device *netdev, u32 data)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       int i;
+
+       pf->led_status = i40e_led_get(hw);
+
+       if (!data || data > 300)
+               data = 300;
+
+       for (i = 0; i < (data * 1000); i += 400) {
+               i40e_led_set(hw, 0xF, false);
+               msleep_interruptible(200);
+               i40e_led_set(hw, 0x0, false);
+               msleep_interruptible(200);
+       }
+
+       i40e_led_set(hw, pf->led_status, false);
+
+       return 0;
+}
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+
+/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
+ * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
+ * 125us (8000 interrupts per second) == ITR(62)
+ */
+
+static int i40e_get_coalesce(struct net_device *netdev,
+                            struct ethtool_coalesce *ec)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+
+       ec->tx_max_coalesced_frames_irq = vsi->work_limit;
+       ec->rx_max_coalesced_frames_irq = vsi->work_limit;
+
+       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+               ec->use_adaptive_rx_coalesce = 1;
+
+       if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+               ec->use_adaptive_tx_coalesce = 1;
+
+       ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+       ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+       /* we use the _usecs_high to store/set the interrupt rate limit
+        * that the hardware supports, that almost but not quite
+        * fits the original intent of the ethtool variable,
+        * the rx_coalesce_usecs_high limits total interrupts
+        * per second from both tx/rx sources.
+        */
+       ec->rx_coalesce_usecs_high = vsi->int_rate_limit;
+       ec->tx_coalesce_usecs_high = vsi->int_rate_limit;
+
+       return 0;
+}
+
+static int i40e_set_coalesce(struct net_device *netdev,
+                            struct ethtool_coalesce *ec)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_q_vector *q_vector;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u16 vector;
+       int i;
+
+       if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+               vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+       /* tx_coalesce_usecs_high is ignored, use rx-usecs-high to adjust limit */
+       if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
+               netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
+               return -EINVAL;
+       }
+
+       if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+               netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+               return -EINVAL;
+       }
+
+       vector = vsi->base_vector;
+       if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
+               vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+       } else if (ec->rx_coalesce_usecs == 0) {
+               vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+               if (ec->use_adaptive_rx_coalesce)
+                       netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+       } else {
+               netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
+               return -EINVAL;
+       }
+
+       vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+
+       if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
+               vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+       } else if (ec->tx_coalesce_usecs == 0) {
+               vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+               if (ec->use_adaptive_tx_coalesce)
+                       netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+       } else {
+               netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
+               return -EINVAL;
+       }
+
+       if (ec->use_adaptive_rx_coalesce)
+               vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+       if (ec->use_adaptive_tx_coalesce)
+               vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+
+               q_vector = vsi->q_vectors[i];
+               q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+               wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
+               q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+               wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+               wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
+               i40e_flush(hw);
+       }
+
+       return 0;
+}
+
+#ifdef ETHTOOL_SRXNTUPLE
+/* We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid
+ * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag
+ * was defined that this function was present.
+ */
+static int i40e_set_rx_ntuple(struct net_device *dev,
+                             struct ethtool_rx_ntuple *cmd)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif /* ETHTOOL_SRXNTUPLE */
+#ifdef ETHTOOL_GRXRINGS
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+{
+       cmd->data = 0;
+
+       if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) {
+               cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data;
+               cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type;
+               return 0;
+       }
+       /* Report default options for RSS on i40e */
+       switch (cmd->flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+               cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+       /* fall through to add IP fields */
+       case SCTP_V4_FLOW:
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case IPV4_FLOW:
+               cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+               break;
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+               cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+       /* fall through to add IP fields */
+       case SCTP_V6_FLOW:
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case IPV6_FLOW:
+               cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_get_ethtool_fdir_all - Populates the rule count of a command
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ * @rule_locs: Array of used rule locations
+ *
+ * This function populates both the total and actual rule count of
+ * the ethtool flow classification command
+ *
+ * Returns 0 on success or -EMSGSIZE if entry not found
+ **/
+static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
+                                    struct ethtool_rxnfc *cmd,
+                                    u32 *rule_locs)
+{
+       struct i40e_fdir_filter *rule;
+       struct hlist_node *node2;
+       int cnt = 0;
+
+       /* report total rule count */
+       cmd->data = i40e_get_fd_cnt_all(pf);
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               if (cnt == cmd->rule_cnt)
+                       return -EMSGSIZE;
+
+               rule_locs[cnt] = rule->fd_id;
+               cnt++;
+       }
+
+       cmd->rule_cnt = cnt;
+
+       return 0;
+}
+
+/**
+ * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * This function looks up a filter based on the Rx flow classification
+ * command and fills the flow spec info for it if found
+ *
+ * Returns 0 on success or -EINVAL if filter not found
+ **/
+static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
+                                      struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fsp =
+                       (struct ethtool_rx_flow_spec *)&cmd->fs;
+       struct i40e_fdir_filter *rule = NULL;
+       struct hlist_node *node2;
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               if (fsp->location <= rule->fd_id)
+                       break;
+       }
+
+       if (!rule || fsp->location != rule->fd_id)
+               return -EINVAL;
+
+       fsp->flow_type = rule->flow_type;
+       if (fsp->flow_type == IP_USER_FLOW) {
+               fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+               fsp->h_u.usr_ip4_spec.proto = 0;
+               fsp->m_u.usr_ip4_spec.proto = 0;
+       }
+
+       /* Reverse the src and dest notion, since the HW views them from
+        * Tx perspective where as the user expects it from Rx filter view.
+        */
+       fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
+       fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
+       fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
+       fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
+
+       if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
+               fsp->ring_cookie = RX_CLS_FLOW_DISC;
+       else
+               fsp->ring_cookie = rule->q_index;
+
+       if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
+               struct i40e_vsi *vsi;
+
+               vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
+               if (vsi && vsi->type == I40E_VSI_SRIOV) {
+                       fsp->h_ext.data[1] = htonl(vsi->vf_id);
+                       fsp->m_ext.data[1] = htonl(0x1);
+               }
+       }
+
+       return 0;
+}
+
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+#define VXLAN_PORT     8472
+
+/**
+ * i40e_get_vxlan_filter_ethtool - get a vxlan filter by loc
+ * @pf: pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * get vxlan filter by loc.
+ * Returns 0 if success.
+ **/
+static int i40e_get_vxlan_filter_ethtool(struct i40e_pf *pf,
+                                        struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fsp =
+                       (struct ethtool_rx_flow_spec *)&cmd->fs;
+       static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       struct i40e_cloud_filter *rule, *pfilter = NULL;
+       struct i40e_vsi *dst_vsi;
+       struct hlist_node *node2;
+       __be32 tena;
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->cloud_filter_list, cloud_node) {
+               /* filter found with the id */
+               if (rule->id == fsp->location) {
+                       pfilter = rule;
+                       break;
+               }
+       }
+       if (!pfilter) {
+               dev_info(&pf->pdev->dev, "No cloud filter with loc %d\n",
+                       fsp->location);
+               return -ENOENT;
+       }
+
+       dst_vsi = i40e_find_vsi_from_id(pf, pfilter->vsi_id);
+       if (dst_vsi && dst_vsi->type == I40E_VSI_SRIOV) {
+               fsp->h_ext.data[1] = htonl(dst_vsi->vf_id);
+               fsp->m_ext.data[1] = htonl(0x1);
+       }
+
+       ether_addr_copy(fsp->h_u.ether_spec.h_dest, pfilter->outer_mac);
+       ether_addr_copy(fsp->h_u.ether_spec.h_source, pfilter->inner_mac);
+       fsp->h_u.usr_ip4_spec.ip4dst = pfilter->inner_ip[0];
+       fsp->h_ext.vlan_tci = pfilter->inner_vlan;
+
+       tena = htonl(pfilter->tenant_id);
+       memcpy(&fsp->h_ext.data[0], &tena, sizeof(tena));
+
+       fsp->ring_cookie = pfilter->queue_id;
+       if (pfilter->flags & I40E_CLOUD_FIELD_OMAC)
+               ether_addr_copy(fsp->m_u.ether_spec.h_dest, mac_broadcast);
+       if (pfilter->flags & I40E_CLOUD_FIELD_IMAC)
+               ether_addr_copy(fsp->m_u.ether_spec.h_source, mac_broadcast);
+       if (pfilter->flags & I40E_CLOUD_FIELD_IVLAN)
+               fsp->m_ext.vlan_tci = htons(0x7fff);
+       if (pfilter->flags & I40E_CLOUD_FIELD_TEN_ID)
+               *(__be32 *)&fsp->m_ext.data[0] = htonl(0x1);
+       if (pfilter->flags & I40E_CLOUD_FIELD_IIP) {
+               fsp->flow_type = IP_USER_FLOW;
+               fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+       } else
+               fsp->flow_type = ETHER_FLOW;
+
+       fsp->flow_type |= FLOW_MAC_EXT;
+
+       return 0;
+}
+
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+/**
+ * i40e_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+                         void *rule_locs)
+#else
+                         u32 *rule_locs)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       int ret = -EOPNOTSUPP;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXRINGS:
+               cmd->data = vsi->alloc_queue_pairs;
+               ret = 0;
+               break;
+       case ETHTOOL_GRXFH:
+               ret = i40e_get_rss_hash_opts(pf, cmd);
+               break;
+       case ETHTOOL_GRXCLSRLCNT:
+               cmd->rule_cnt = pf->fdir_pf_active_filters;
+               /* report total rule count */
+               cmd->data = i40e_get_fd_cnt_all(pf);
+               ret = 0;
+               break;
+       case ETHTOOL_GRXCLSRULE:
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+               ret = i40e_get_vxlan_filter_ethtool(pf, cmd);
+#endif
+               ret = i40e_get_ethtool_fdir_entry(pf, cmd);
+               break;
+       case ETHTOOL_GRXCLSRLALL:
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+               ret = i40e_get_ethtool_fdir_all(pf, cmd, (u32 *)rule_locs);
+#else
+               ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
+#endif
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+                  ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+
+       /* RSS does not support anything other than hashing
+        * to queues on src and dst IPs and ports
+        */
+       if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+                         RXH_L4_B_0_1 | RXH_L4_B_2_3))
+               return -EINVAL;
+
+       /* We need at least the IP SRC and DEST fields for hashing */
+       if (!(nfc->data & RXH_IP_SRC) ||
+           !(nfc->data & RXH_IP_DST))
+               return -EINVAL;
+
+       switch (nfc->flow_type) {
+       case TCP_V4_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case TCP_V6_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case UDP_V4_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case UDP_V6_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case SCTP_V4_FLOW:
+               if ((nfc->data & RXH_L4_B_0_1) ||
+                   (nfc->data & RXH_L4_B_2_3))
+                       return -EINVAL;
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+               break;
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case SCTP_V6_FLOW:
+               if ((nfc->data & RXH_L4_B_0_1) ||
+                   (nfc->data & RXH_L4_B_2_3))
+                       return -EINVAL;
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+               break;
+       case IPV4_FLOW:
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+                       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+               break;
+       case IPV6_FLOW:
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+                       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+       wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+       i40e_flush(hw);
+
+       /* Save setting for future output/update */
+       pf->vsi[pf->lan_vsi]->rxnfc = *nfc;
+
+       return 0;
+}
+
+/**
+ * i40e_match_fdir_input_set - Match a new filter against an existing one
+ * @rule: The filter already added
+ * @input: The new filter to comapre against
+ *
+ * Returns true if the two input set match
+ **/
+static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
+                                     struct i40e_fdir_filter *input)
+{
+       if ((rule->dst_ip[0] != input->dst_ip[0]) ||
+           (rule->src_ip[0] != input->src_ip[0]) ||
+           (rule->dst_port != input->dst_port) ||
+           (rule->src_port != input->src_port))
+               return false;
+       return true;
+}
+
+/**
+ * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @input: The filter to update or NULL to indicate deletion
+ * @sw_idx: Software index to the filter
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * This function updates (or deletes) a Flow Director entry from
+ * the hlist of the corresponding PF
+ *
+ * Returns 0 on success
+ **/
+static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
+                                         struct i40e_fdir_filter *input,
+                                         u16 sw_idx,
+                                         struct ethtool_rxnfc *cmd)
+{
+       struct i40e_fdir_filter *rule, *parent;
+       struct i40e_pf *pf = vsi->back;
+       struct hlist_node *node2;
+       int err = -EINVAL;
+
+       parent = NULL;
+       rule = NULL;
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               /* hash found, or no matching entry */
+               if (rule->fd_id >= sw_idx)
+                       break;
+               parent = rule;
+       }
+
+       /* if there is an old rule occupying our place remove it */
+       if (rule && (rule->fd_id == sw_idx)) {
+               if (input && !i40e_match_fdir_input_set(rule, input))
+                               err = i40e_add_del_fdir(vsi, rule, false);
+               else if (!input)
+                               err = i40e_add_del_fdir(vsi, rule, false);
+               hlist_del(&rule->fdir_node);
+               kfree(rule);
+               pf->fdir_pf_active_filters--;
+       }
+
+       /* If no input this was a delete, err should be 0 if a rule was
+        * successfully found and removed from the list else -EINVAL
+        */
+       if (!input)
+               return err;
+
+       /* initialize node and set software index */
+       INIT_HLIST_NODE(&input->fdir_node);
+
+       /* add filter to the list */
+       if (parent)
+               hlist_add_behind(&input->fdir_node, &parent->fdir_node);
+       else
+               hlist_add_head(&input->fdir_node,
+                              &pf->fdir_filter_list);
+
+       /* update counts */
+       pf->fdir_pf_active_filters++;
+
+       return 0;
+}
+
+/**
+ * i40e_del_fdir_entry - Deletes a Flow Director filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * The function removes a Flow Director filter entry from the
+ * hlist of the corresponding PF
+ *
+ * Returns 0 on success
+ */
+static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
+                              struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fsp =
+               (struct ethtool_rx_flow_spec *)&cmd->fs;
+       struct i40e_pf *pf = vsi->back;
+       int ret = 0;
+
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+           test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+               return -EBUSY;
+
+       if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+               return -EBUSY;
+
+       ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
+
+       i40e_fdir_check_and_reenable(pf);
+       return ret;
+}
+
+/**
+ * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ *
+ * Add Flow Director filters for a specific flow spec based on their
+ * protocol.  Returns 0 if the filters were successfully added.
+ **/
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+                                struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fsp;
+       struct i40e_fdir_filter *input;
+       struct i40e_pf *pf;
+       int ret = -EINVAL;
+       u16 vf_id;
+
+       if (!vsi)
+               return -EINVAL;
+
+       pf = vsi->back;
+
+       if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+               return -EOPNOTSUPP;
+
+       if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+               return -ENOSPC;
+
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+           test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+               return -EBUSY;
+
+       if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+               return -EBUSY;
+
+       fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+       pf = vsi->back;
+
+       if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
+                             pf->hw.func_caps.fd_filters_guaranteed)) {
+               return -EINVAL;
+       }
+
+       if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+           (fsp->ring_cookie >= vsi->num_queue_pairs))
+               return -EINVAL;
+
+       input = kzalloc(sizeof(*input), GFP_KERNEL);
+
+       if (!input)
+               return -ENOMEM;
+
+       input->fd_id = fsp->location;
+
+       if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+               input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+       else
+               input->dest_ctl =
+                            I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+
+       input->q_index = fsp->ring_cookie;
+       input->flex_off = 0;
+       input->pctype = 0;
+       input->dest_vsi = vsi->id;
+       input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
+       input->cnt_index  = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
+       input->flow_type = fsp->flow_type;
+       input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
+
+       /* Reverse the src and dest notion, since the HW expects them to be from
+        * Tx perspective where as the input from user is from Rx filter view.
+        */
+       input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
+       input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
+       input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+       input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+       if (ntohl(fsp->m_ext.data[1])) {
+               if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
+                       netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+                       goto free_input;
+               }
+               vf_id = ntohl(fsp->h_ext.data[1]);
+               /* Find vsi id from vf id and override dest vsi */
+               input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
+               if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
+                       netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+                       goto free_input;
+               }
+       }
+
+       ret = i40e_add_del_fdir(vsi, input, true);
+free_input:
+       if (ret)
+               kfree(input);
+       else
+               i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
+
+       return ret;
+}
+
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+/**
+ * i40e_vxlan_filter_mask2flags- Convert Vxlan filter details to filter type
+ * @fsp: RX flow classification rules
+ * @flags: Resultant combination of all the fields to decide the tuple
+ *
+ * Returns 0 if a valid filter type was identified.
+ **/
+static inline i40e_status i40e_vxlan_filter_mask2flags(
+                                       struct ethtool_rx_flow_spec *fsp,
+                                       u8 *flags)
+{
+       static const u8 mac_broadcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       static const u8 mac_zero[] = { 0, 0, 0, 0, 0, 0 };
+       u8 i = 0;
+       u16 vlan_tci = fsp->m_ext.vlan_tci;
+       u32 vxlan_id = 0;
+
+       *flags = 0;
+
+       if (ntohl(fsp->h_ext.data[0] != 0xffffffff))
+               vxlan_id = ntohl(fsp->m_ext.data[0]);
+
+       switch (fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+       case ETHER_FLOW:
+               if (!memcmp(fsp->m_u.ether_spec.h_dest, mac_broadcast,
+                   sizeof(mac_broadcast)))
+                       i |= I40E_CLOUD_FIELD_OMAC;
+               else if (!memcmp(fsp->m_u.ether_spec.h_dest, mac_zero,
+                   sizeof(mac_broadcast)))
+                       i &= ~I40E_CLOUD_FIELD_OMAC;
+               else
+                       return I40E_ERR_CONFIG;
+
+               if (!memcmp(fsp->m_u.ether_spec.h_source, mac_broadcast,
+                   sizeof(mac_broadcast)))
+                       i |= I40E_CLOUD_FIELD_IMAC;
+               else if (!memcmp(fsp->m_u.ether_spec.h_source, mac_zero,
+                   sizeof(mac_broadcast)))
+                       i &= ~I40E_CLOUD_FIELD_IMAC;
+               else
+                       return I40E_ERR_CONFIG;
+               break;
+
+       case IP_USER_FLOW:
+               if (fsp->m_u.usr_ip4_spec.ip4dst == 0xffffffff)
+                       i |= I40E_CLOUD_FIELD_IIP;
+               else if (fsp->m_u.usr_ip4_spec.ip4dst == 0)
+                       i &= ~I40E_CLOUD_FIELD_IIP;
+               else
+                       return I40E_ERR_CONFIG;
+               break;
+       default:
+               return I40E_ERR_CONFIG;
+       }
+
+       switch (vlan_tci & 0x7fff) {
+       case 0x7fff:
+               i |= I40E_CLOUD_FIELD_IVLAN;
+               break;
+       case 0:
+               i &= ~I40E_CLOUD_FIELD_IVLAN;
+               break;
+       default:
+               return I40E_ERR_CONFIG;
+       }
+
+       switch (vxlan_id & 0xffffff) {
+       case 0xffffff:
+               i |= I40E_CLOUD_FIELD_TEN_ID;
+               break;
+       case 0:
+               i &= ~I40E_CLOUD_FIELD_TEN_ID;
+               break;
+       default:
+               return I40E_ERR_CONFIG;
+       }
+
+       *flags = i;
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_add_vxlan_filter_ethtool - Add vxlan filter
+ * @pf: pointer to the physical function struct
+ * @fsp: RX flow classification rules
+ *
+ * Add vxlan filter for a specific flow spec.
+ * Returns 0 if the filter were successfully added.
+ **/
+static int i40e_add_vxlan_filter_ethtool(struct i40e_pf *pf,
+                                        struct ethtool_rx_flow_spec *fsp)
+{
+       struct i40e_vsi *dst_vsi, *vsi = NULL;
+       struct i40e_cloud_filter *rule, *parent, *pfilter = NULL;
+       struct hlist_node *node2;
+       u16 vf_id, vsi_idx;
+       u8 flags = 0;
+       int ret;
+
+       if (ntohl(fsp->m_ext.data[1])) {
+               vf_id = (u16)ntohl(fsp->h_ext.data[1]);
+               /* if Vf id >= num_vfs, program a filter for PF Main VSI */
+               if (vf_id >= pf->num_alloc_vfs) {
+                       dev_info(&pf->pdev->dev,
+                                "Out of range vf_id, adding the cloud filter for Main VSI %d\n",
+                                vf_id);
+                       dst_vsi = pf->vsi[pf->lan_vsi];
+               } else {
+                       vsi_idx = pf->vf[vf_id].lan_vsi_idx;
+                       dst_vsi = pf->vsi[vsi_idx];
+                       if (!dst_vsi) {
+                               dev_info(&pf->pdev->dev,
+                                        "Invalid vf_id %d\n", vf_id);
+                               return -EINVAL;
+                       }
+               }
+       } else {
+               dst_vsi = pf->vsi[pf->lan_vsi];
+       }
+
+       if (fsp->ring_cookie >= dst_vsi->num_queue_pairs) {
+               dev_info(&pf->pdev->dev,
+                        "Invalid queue_id %llu\n", fsp->ring_cookie);
+               return -EINVAL;
+       }
+
+       ret = i40e_vxlan_filter_mask2flags(fsp, &flags);
+       if (ret || !flags) {
+               dev_info(&pf->pdev->dev,
+                        "Invalid mask config, ret = %d, flags = %d\n",
+                        ret, flags);
+               return -EINVAL;
+       }
+
+       parent = NULL;
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->cloud_filter_list, cloud_node) {
+               /* filter exists with the id */
+               if (rule->id >= fsp->location) {
+                       pfilter = rule;
+                       break;
+               }
+               parent = rule;
+       }
+       /* if filter exists with same id, delete old */
+       if (pfilter && (pfilter->id == fsp->location)) {
+               vsi = i40e_find_vsi_from_id(pf, pfilter->vsi_id);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev, "no vsi with vsi_id %d\n",
+                               pfilter->vsi_id);
+                       return -ENOSYS;
+               }
+               ret = i40e_add_del_cloud_filter(pf, pfilter, vsi, false);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "fail to delete old cloud filter, err = %d\n",
+                                ret);
+                       return -ENOSYS;
+               }
+               hlist_del(&pfilter->cloud_node);
+               kfree(pfilter);
+               pf->num_cloud_filters--;
+       }
+
+       pfilter = kzalloc(sizeof(*pfilter), GFP_KERNEL);
+
+       if (!pfilter)
+               return -ENOMEM;
+
+       pfilter->id = fsp->location;
+       pfilter->vsi_id = dst_vsi->id;
+       switch (fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+       case ETHER_FLOW:
+               ether_addr_copy(pfilter->outer_mac,
+                       fsp->h_u.ether_spec.h_dest);
+               ether_addr_copy(pfilter->inner_mac,
+                       fsp->h_u.ether_spec.h_source);
+               break;
+       case IP_USER_FLOW:
+               pfilter->inner_ip[0] = fsp->h_u.usr_ip4_spec.ip4dst;
+               break;
+       default:
+               dev_info(&pf->pdev->dev, "unknown flow type\n");
+               kfree(pfilter);
+               return I40E_ERR_CONFIG;
+       }
+
+       pfilter->inner_vlan = fsp->h_ext.vlan_tci;
+
+       if (ntohl(fsp->h_ext.data[0] != 0xffffffff))
+               pfilter->tenant_id = ntohl(fsp->h_ext.data[0]);
+       /* else this is a  L3 VEB filter for non-tunneled packets or a tuple
+        * without vni.
+        */
+       pfilter->queue_id = fsp->ring_cookie;
+       pfilter->tunnel_type = I40E_CLOUD_TNL_TYPE_XVLAN;
+       pfilter->flags = flags;
+
+       ret = i40e_add_del_cloud_filter(pf, pfilter, dst_vsi, true);
+       if (ret) {
+               kfree(pfilter);
+               dev_info(&pf->pdev->dev,
+                        "fail to add cloud filter, err = %d\n", ret);
+               return -ENOSYS;
+       }
+
+       INIT_HLIST_NODE(&pfilter->cloud_node);
+       /* add filter to the list */
+
+       if (parent)
+               hlist_add_behind(&pfilter->cloud_node, &parent->cloud_node);
+       else
+               hlist_add_head(&pfilter->cloud_node,
+                                      &pf->cloud_filter_list);
+       pf->num_cloud_filters++;
+
+       return 0;
+}
+
+/**
+ * i40e_del_vxlan_filter_ethtool - del vxlan filter
+ * @pf: pointer to the physical function struct
+ * @fsp: RX flow classification rules
+ *
+ * Delete vxlan filter for a specific flow spec.
+ * Returns 0 if the filter was successfully deleted.
+ **/
+static int i40e_del_vxlan_filter_ethtool(struct i40e_pf *pf,
+                                        struct ethtool_rx_flow_spec *fsp)
+{
+       struct i40e_cloud_filter *rule, *pfilter = NULL;
+       struct i40e_vsi *vsi = NULL;
+       struct hlist_node *node2;
+       int ret;
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->cloud_filter_list, cloud_node) {
+               /* filter found with the id */
+               if (rule->id == fsp->location) {
+                       pfilter = rule;
+                       break;
+               }
+       }
+       if (!pfilter) {
+               dev_info(&pf->pdev->dev, "no cloud filter exists with id %d\n",
+                       fsp->location);
+               return -ENOENT;
+       }
+       vsi = i40e_find_vsi_from_id(pf, pfilter->vsi_id);
+       if (!vsi) {
+               dev_info(&pf->pdev->dev,
+                        "no vsi with vsi_id %d\n", pfilter->vsi_id);
+               return -ENOSYS;
+       }
+
+       ret = i40e_add_del_cloud_filter(pf, pfilter, vsi, false);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "failed to delete cloud filter, err = %d\n",
+                        ret);
+               return -ENOSYS;
+       }
+
+       /* remove filter from the list */
+       hlist_del(&pfilter->cloud_node);
+       kfree(pfilter);
+       pf->num_cloud_filters--;
+
+       return 0;
+}
+
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+
+/**
+ * i40e_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+       struct ethtool_rx_flow_spec *fsp;
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       int ret = -EOPNOTSUPP;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_SRXFH:
+               ret = i40e_set_rss_hash_opt(pf, cmd);
+               break;
+       case ETHTOOL_SRXCLSRLINS:
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+               fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+#define I40E_USER_DATA_VXLAN_CLOUD_FILTER 1
+               if (ntohl(fsp->h_ext.data[0]) >=
+                                       I40E_USER_DATA_VXLAN_CLOUD_FILTER)
+                       ret = i40e_add_vxlan_filter_ethtool(pf, fsp);
+               else
+                       ret = i40e_add_fdir_ethtool(vsi, cmd);
+               break;
+#else
+               ret = i40e_add_fdir_ethtool(vsi, cmd);
+               break;
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+       case ETHTOOL_SRXCLSRLDEL:
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+               fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+               if (ntohl(fsp->h_ext.data[0]) >=
+                                       I40E_USER_DATA_VXLAN_CLOUD_FILTER)
+                       ret = i40e_del_vxlan_filter_ethtool(pf, fsp);
+               else
+                       ret = i40e_del_fdir_entry(vsi, cmd);
+#else
+               ret = i40e_del_fdir_entry(vsi, cmd);
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+#endif /* ETHTOOL_GRXRINGS */
+#ifdef ETHTOOL_SCHANNELS
+/**
+ * i40e_max_channels - get Max number of combined channels supported
+ * @vsi: vsi pointer
+ **/
+static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
+{
+       /* TODO: This code assumes DCB and FD is disabled for now. */
+       return vsi->alloc_queue_pairs;
+}
+
+/**
+ * i40e_get_channels - Get the current channels enabled and max supported etc.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void i40e_get_channels(struct net_device *dev,
+                              struct ethtool_channels *ch)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+
+       /* report maximum channels */
+       ch->max_combined = i40e_max_channels(vsi);
+
+       /* report info for other vector */
+       ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+       ch->max_other = ch->other_count;
+
+       /* Note: This code assumes DCB is disabled for now. */
+       ch->combined_count = vsi->num_queue_pairs;
+}
+
+/**
+ * i40e_set_channels - Set the new channels count.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * The new channels count may not be the same as requested by the user
+ * since it gets rounded down to a power of 2 value.
+ **/
+static int i40e_set_channels(struct net_device *dev,
+                             struct ethtool_channels *ch)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       unsigned int count = ch->combined_count;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       int new_count;
+
+       /* We do not support setting channels for any other VSI at present */
+       if (vsi->type != I40E_VSI_MAIN)
+               return -EINVAL;
+
+       /* verify they are not requesting separate vectors */
+       if (!count || ch->rx_count || ch->tx_count)
+               return -EINVAL;
+
+       /* verify other_count has not changed */
+       if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+               return -EINVAL;
+
+       /* verify the number of channels does not exceed hardware limits */
+       if (count > i40e_max_channels(vsi))
+               return -EINVAL;
+
+       /* update feature limits from largest to smallest supported values */
+       /* TODO: Flow director limit, DCB etc */
+
+       /* use rss_reconfig to rebuild with new queue count and update traffic
+        * class queue mapping
+        */
+       new_count = i40e_reconfig_rss_queues(pf, count);
+       if (new_count > 0)
+               return 0;
+       else
+               return -EINVAL;
+}
+
+#endif /* ETHTOOL_SCHANNELS */
+#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
+/**
+ * i40e_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_key_size(struct net_device *netdev)
+{
+       return I40E_HKEY_ARRAY_SIZE;
+}
+#endif /* ETHTOOL_GRSSH */
+
+#ifdef ETHTOOL_GRXFHINDIR
+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+/**
+ * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
+{
+       return I40E_HLUT_ARRAY_SIZE;
+}
+
+#if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
+#ifdef HAVE_RXFH_HASHFUNC
+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+                        u8 *hfunc)
+#else
+/**
+ * i40e_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+#endif
+#else
+/**
+ * i40e_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40e_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+#endif /* ETHTOOL_GRSSH */
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_val;
+       int i, j;
+
+#ifdef HAVE_RXFH_HASHFUNC
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+#endif
+       if (!indir)
+               return 0;
+
+       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               reg_val = rd32(hw, I40E_PFQF_HLUT(i));
+               indir[j++] = reg_val & 0xff;
+               indir[j++] = (reg_val >> 8) & 0xff;
+               indir[j++] = (reg_val >> 16) & 0xff;
+               indir[j++] = (reg_val >> 24) & 0xff;
+       }
+
+#if defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
+       if (key) {
+               for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+                       reg_val = rd32(hw, I40E_PFQF_HKEY(i));
+                       key[j++] = (u8)(reg_val & 0xff);
+                       key[j++] = (u8)((reg_val >> 8) & 0xff);
+                       key[j++] = (u8)((reg_val >> 16) & 0xff);
+                       key[j++] = (u8)((reg_val >> 24) & 0xff);
+               }
+       }
+#endif
+       return 0;
+}
+#else
+/**
+ * i40e_get_rxfh_indir - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Reads the indirection table directly from the hardware. Returns 0 or -EINVAL
+ * if the supplied table isn't large enough.
+ **/
+static int i40e_get_rxfh_indir(struct net_device *netdev,
+                              struct ethtool_rxfh_indir *indir)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_val;
+       int i, j;
+
+       if (indir->size < I40E_HLUT_ARRAY_SIZE)
+               return -EINVAL;
+
+       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               reg_val = rd32(hw, I40E_PFQF_HLUT(i));
+               indir->ring_index[j++] = reg_val & 0xff;
+               indir->ring_index[j++] = (reg_val >> 8) & 0xff;
+               indir->ring_index[j++] = (reg_val >> 16) & 0xff;
+               indir->ring_index[j++] = (reg_val >> 24) & 0xff;
+       }
+       indir->size = ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4);
+       return 0;
+}
+
+#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
+#endif /* ETHTOOL_GRXFHINDIR */
+#ifdef ETHTOOL_SRXFHINDIR
+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
+/**
+ * i40e_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ *
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+#ifdef HAVE_RXFH_HASHFUNC
+static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
+                        const u8 *key, const u8 hfunc)
+#else
+static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
+                        const u8 *key)
+#endif
+#else
+/**
+ * i40e_set_rxfh_indir - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40e_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+#endif /* EHTTOOL_SRSSH */
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_val;
+       int i, j;
+
+#ifdef HAVE_RXFH_HASHFUNC
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+#endif
+       if (!indir)
+               return 0;
+
+       /* Verify user input. */
+       for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) {
+               if (indir[i] >= pf->rss_size)
+                       return -EINVAL;
+       }
+
+       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               reg_val = indir[j++];
+               reg_val |= indir[j++] << 8;
+               reg_val |= indir[j++] << 16;
+               reg_val |= indir[j++] << 24;
+               wr32(hw, I40E_PFQF_HLUT(i), reg_val);
+       }
+#if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
+       if (key) {
+               for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+                       reg_val = key[j++];
+                       reg_val |= key[j++] << 8;
+                       reg_val |= key[j++] << 16;
+                       reg_val |= key[j++] << 24;
+                       wr32(hw, I40E_PFQF_HKEY(i), reg_val);
+               }
+       }
+#endif
+       return 0;
+}
+#else
+/**
+ * i40e_set_rxfh_indir - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40e_set_rxfh_indir(struct net_device *netdev,
+                                const struct ethtool_rxfh_indir *indir)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_val;
+       int i, j;
+
+       if (indir->size < I40E_HLUT_ARRAY_SIZE)
+               return -EINVAL;
+
+       /* Verify user input. */
+       for (i = 0; i < (I40E_PFQF_HLUT_MAX_INDEX + 1) * 4; i++) {
+               if (indir->ring_index[i] >= pf->rss_size)
+                       return -EINVAL;
+       }
+
+       for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               reg_val = indir->ring_index[j++];
+               reg_val |= indir->ring_index[j++] << 8;
+               reg_val |= indir->ring_index[j++] << 16;
+               reg_val |= indir->ring_index[j++] << 24;
+               wr32(hw, I40E_PFQF_HLUT(i), reg_val);
+       }
+
+       return 0;
+}
+#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
+#endif /* ETHTOOL_SRXFHINDIR */
+
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+/**
+ * i40e_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned.  Add new strings for each flag to the i40e_priv_flags_strings
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40e_get_priv_flags(struct net_device *dev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       u32 ret_flags = 0;
+
+       ret_flags |= pf->flags & I40E_FLAG_MFP_ENABLED ?
+               I40E_PRIV_FLAGS_MFP_FLAG : 0;
+       ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
+               I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
+       ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
+               I40E_PRIV_FLAGS_FD_ATR : 0;
+       ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
+               I40E_PRIV_FLAGS_VEB_STATS : 0;
+
+       return ret_flags;
+}
+
+/**
+ * i40e_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+
+       if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
+               pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
+       else
+               pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
+
+       /* allow the user to control the state of the Flow
+        * Director ATR (Application Targeted Routing) feature
+        * of the driver
+        */
+       if (flags & I40E_PRIV_FLAGS_FD_ATR) {
+               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+       } else {
+               pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+               pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+       }
+
+       if (flags & I40E_PRIV_FLAGS_VEB_STATS)
+               pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
+       else
+               pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
+       return 0;
+}
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+
+static const struct ethtool_ops i40e_ethtool_ops = {
+       .get_settings           = i40e_get_settings,
+       .set_settings           = i40e_set_settings,
+       .get_drvinfo            = i40e_get_drvinfo,
+       .get_regs_len           = i40e_get_regs_len,
+       .get_regs               = i40e_get_regs,
+       .nway_reset             = i40e_nway_reset,
+       .get_link               = ethtool_op_get_link,
+       .get_wol                = i40e_get_wol,
+       .set_wol                = i40e_set_wol,
+       .set_eeprom             = i40e_set_eeprom,
+       .get_eeprom_len         = i40e_get_eeprom_len,
+       .get_eeprom             = i40e_get_eeprom,
+       .get_ringparam          = i40e_get_ringparam,
+       .set_ringparam          = i40e_set_ringparam,
+       .get_pauseparam         = i40e_get_pauseparam,
+       .set_pauseparam         = i40e_set_pauseparam,
+       .get_msglevel           = i40e_get_msglevel,
+       .set_msglevel           = i40e_set_msglevel,
+#ifndef HAVE_NDO_SET_FEATURES
+       .get_rx_csum            = i40e_get_rx_csum,
+       .set_rx_csum            = i40e_set_rx_csum,
+       .get_tx_csum            = i40e_get_tx_csum,
+       .set_tx_csum            = i40e_set_tx_csum,
+       .get_sg                 = ethtool_op_get_sg,
+       .set_sg                 = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+       .get_tso                = ethtool_op_get_tso,
+       .set_tso                = i40e_set_tso,
+#endif
+#ifdef ETHTOOL_GFLAGS
+       .get_flags              = ethtool_op_get_flags,
+       .set_flags              = i40e_set_flags,
+#endif
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GRXRINGS
+       .get_rxnfc              = i40e_get_rxnfc,
+       .set_rxnfc              = i40e_set_rxnfc,
+#ifdef ETHTOOL_SRXNTUPLE
+       .set_rx_ntuple          = i40e_set_rx_ntuple,
+#endif
+#endif
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+       .self_test_count        = i40e_diag_test_count,
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+       .self_test              = i40e_diag_test,
+       .get_strings            = i40e_get_strings,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+       .set_phys_id            = i40e_set_phys_id,
+#else
+       .phys_id                = i40e_phys_id,
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifndef HAVE_ETHTOOL_GET_SSET_COUNT
+       .get_stats_count        = i40e_get_stats_count,
+#else /* HAVE_ETHTOOL_GET_SSET_COUNT */
+       .get_sset_count         = i40e_get_sset_count,
+       .get_priv_flags         = i40e_get_priv_flags,
+       .set_priv_flags         = i40e_set_priv_flags,
+#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */
+       .get_ethtool_stats      = i40e_get_ethtool_stats,
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+       .get_perm_addr          = ethtool_op_get_perm_addr,
+#endif
+       .get_coalesce           = i40e_get_coalesce,
+       .set_coalesce           = i40e_set_coalesce,
+#if defined(ETHTOOL_SRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)
+       .get_rxfh_key_size      = i40e_get_rxfh_key_size,
+#endif
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef ETHTOOL_GRXFHINDIR
+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+       .get_rxfh_indir_size    = i40e_get_rxfh_indir_size,
+#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
+#ifdef ETHTOOL_GRSSH
+       .get_rxfh               = i40e_get_rxfh,
+#else
+       .get_rxfh_indir         = i40e_get_rxfh_indir,
+#endif /* ETHTOOL_GRSSH */
+#endif /* ETHTOOL_GRXFHINDIR */
+#ifdef ETHTOOL_SRXFHINDIR
+#ifdef ETHTOOL_SRSSH
+       .set_rxfh               = i40e_set_rxfh,
+#else
+       .set_rxfh_indir         = i40e_set_rxfh_indir,
+#endif /* ETHTOOL_SRSSH */
+#endif /* ETHTOOL_SRXFHINDIR */
+#ifdef ETHTOOL_SCHANNELS
+       .get_channels           = i40e_get_channels,
+       .set_channels           = i40e_set_channels,
+#endif
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+       .get_ts_info            = i40e_get_ts_info,
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+};
+
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+static const struct ethtool_ops_ext i40e_ethtool_ops_ext = {
+       .size                   = sizeof(struct ethtool_ops_ext),
+       .get_ts_info            = i40e_get_ts_info,
+       .set_phys_id            = i40e_set_phys_id,
+       .get_channels           = i40e_get_channels,
+       .set_channels           = i40e_set_channels,
+       .get_rxfh_indir_size    = i40e_get_rxfh_indir_size,
+       .get_rxfh_indir         = i40e_get_rxfh_indir,
+       .set_rxfh_indir         = i40e_set_rxfh_indir,
+};
+
+void i40e_set_ethtool_ops(struct net_device *netdev)
+{
+       netdev->ethtool_ops = &i40e_ethtool_ops;
+       set_ethtool_ops_ext(netdev, &i40e_ethtool_ops_ext);
+}
+#else
+void i40e_set_ethtool_ops(struct net_device *netdev)
+{
+       netdev->ethtool_ops = &i40e_ethtool_ops;
+}
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#endif /* SIOCETHTOOL */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.c
new file mode 100644 (file)
index 0000000..61dcba6
--- /dev/null
@@ -0,0 +1,1648 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/if_ether.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+
+#include "i40e.h"
+#include "i40e_fcoe.h"
+
+/**
+ * i40e_rx_is_fip - returns true if the rx packet type is FIP
+ * @ptype: the packet type field from rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fip(u16 ptype)
+{
+       return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
+}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
+ * @ptype: the packet type field from rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+       return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+              (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
+
+/**
+ * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
+ * @sof: the FCoE start of frame delimiter
+ **/
+static inline bool i40e_fcoe_sof_is_class2(u8 sof)
+{
+       return (sof == FC_SOF_I2) || (sof == FC_SOF_N2);
+}
+
+/**
+ * i40e_fcoe_sof_is_class3 - returns true if this is a FC Class 3 SOF
+ * @sof: the FCoE start of frame delimiter
+ **/
+static inline bool i40e_fcoe_sof_is_class3(u8 sof)
+{
+       return (sof == FC_SOF_I3) || (sof == FC_SOF_N3);
+}
+
+/**
+ * i40e_fcoe_sof_is_supported - returns true if the FC SOF is supported by HW
+ * @sof: the input SOF value from the frame
+ **/
+static inline bool i40e_fcoe_sof_is_supported(u8 sof)
+{
+       return i40e_fcoe_sof_is_class2(sof) ||
+              i40e_fcoe_sof_is_class3(sof);
+}
+
+/**
+ * i40e_fcoe_fc_sof - pull the SOF from FCoE header in the frame
+ * @skb: the frame whose EOF is to be pulled from
+ **/
+static inline int i40e_fcoe_fc_sof(struct sk_buff *skb, u8 *sof)
+{
+       *sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
+
+       if (!i40e_fcoe_sof_is_supported(*sof))
+               return -EINVAL;
+       return 0;
+}
+
+/**
+ * i40e_fcoe_eof_is_supported - returns true if the EOF is supported by HW
+ * @eof:     the input EOF value from the frame
+ **/
+static inline bool i40e_fcoe_eof_is_supported(u8 eof)
+{
+       return (eof == FC_EOF_N) || (eof == FC_EOF_T) ||
+              (eof == FC_EOF_NI) || (eof == FC_EOF_A);
+}
+
+/**
+ * i40e_fcoe_fc_eof - pull EOF from FCoE trailer in the frame
+ * @skb: the frame whose EOF is to be pulled from
+ **/
+static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
+{
+       /* the first byte of the last dword is EOF */
+       skb_copy_bits(skb, skb->len - 4, eof, 1);
+
+       if (!i40e_fcoe_eof_is_supported(*eof))
+               return -EINVAL;
+       return 0;
+}
+
+/**
+ * i40e_fcoe_ctxt_eof - convert input FC EOF for descriptor programming
+ * @eof: the input eof value from the frame
+ *
+ * The FC EOF is converted to the value understood by HW for descriptor
+ * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
+ * first and that already checks for all supported valid eof values.
+ **/
+static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
+{
+       switch (eof) {
+       case FC_EOF_N:
+               return I40E_TX_DESC_CMD_L4T_EOFT_EOF_N;
+       case FC_EOF_T:
+               return I40E_TX_DESC_CMD_L4T_EOFT_EOF_T;
+       case FC_EOF_NI:
+               return I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI;
+       case FC_EOF_A:
+               return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
+       default:
+               /* Supported valid eof shall be already checked by
+                * calling i40e_fcoe_eof_is_supported() first,
+                * therefore this default case shall never hit.
+                */
+               WARN_ON(1);
+               return -EINVAL;
+       }
+}
+
+/**
+ * i40e_fcoe_xid_is_valid - returns true if the exchange id is valid
+ * @xid: the exchange id
+ **/
+static inline bool i40e_fcoe_xid_is_valid(u16 xid)
+{
+       return (xid != FC_XID_UNKNOWN) && (xid < I40E_FCOE_DDP_MAX);
+}
+
+/**
+ * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated
+ * @pf: pointer to PF
+ * @ddp: sw DDP context
+ *
+ * Unmap the scatter-gather list associated with the given SW DDP context
+ *
+ * Returns: data length already ddp-ed in bytes
+ *
+ **/
+static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf,
+                                      struct i40e_fcoe_ddp *ddp)
+{
+       if (test_and_set_bit(__I40E_FCOE_DDP_UNMAPPED, &ddp->flags))
+               return;
+
+       if (ddp->sgl) {
+               dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc,
+                            DMA_FROM_DEVICE);
+               ddp->sgl = NULL;
+               ddp->sgc = 0;
+       }
+
+       if (ddp->pool) {
+               dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
+               ddp->pool = NULL;
+       }
+}
+
+/**
+ * i40e_fcoe_ddp_clear - clear the given SW DDP context
+ * @ddp - SW DDP context
+ **/
+static inline void i40e_fcoe_ddp_clear(struct i40e_fcoe_ddp *ddp)
+{
+       memset(ddp, 0, sizeof(struct i40e_fcoe_ddp));
+       ddp->xid = FC_XID_UNKNOWN;
+       ddp->flags = __I40E_FCOE_DDP_NONE;
+}
+
+/**
+ * i40e_fcoe_progid_is_fcoe - check if the prog_id is for FCoE
+ * @id: the prog id for the programming status Rx descriptor write-back
+ **/
+static inline bool i40e_fcoe_progid_is_fcoe(u8 id)
+{
+       return (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
+              (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS);
+}
+
+/**
+ * i40e_fcoe_fc_get_xid - get xid from the frame header
+ * @fh: the fc frame header
+ *
+ * In case the incoming frame's exchange is originated from
+ * the initiator, then received frame's exchange id is ANDed
+ * with fc_cpu_mask bits to get the same cpu on which exchange
+ * was originated, otherwise just use the current cpu.
+ *
+ * Returns ox_id if exchange originator, rx_id if responder
+ **/
+static inline u16 i40e_fcoe_fc_get_xid(struct fc_frame_header *fh)
+{
+       u32 f_ctl = ntoh24(fh->fh_f_ctl);
+
+       return (f_ctl & FC_FC_EX_CTX) ?
+               be16_to_cpu(fh->fh_ox_id) :
+               be16_to_cpu(fh->fh_rx_id);
+}
+
+/**
+ * i40e_fcoe_fc_frame_header - get fc frame header from skb
+ * @skb: packet
+ *
+ * This checks if there is a VLAN header and returns the data
+ * pointer to the start of the fc_frame_header.
+ *
+ * Returns pointer to the fc_frame_header
+ **/
+static inline struct fc_frame_header *i40e_fcoe_fc_frame_header(
+       struct sk_buff *skb)
+{
+       void *fh = skb->data + sizeof(struct fcoe_hdr);
+
+       if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
+               fh += sizeof(struct vlan_hdr);
+
+       return (struct fc_frame_header *)fh;
+}
+
+/**
+ * i40e_fcoe_ddp_put - release the DDP context for a given exchange id
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id that corresponding DDP context will be released
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
+ * and it is expected to be called by ULD, i.e., FCP layer of libfc
+ * to release the corresponding ddp context when the I/O is done.
+ *
+ * Returns : data length already ddp-ed in bytes
+ **/
+static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       int len = 0;
+       struct i40e_fcoe_ddp *ddp = &fcoe->ddp[xid];
+
+       if (!fcoe || !ddp)
+               goto out;
+
+       if (test_bit(__I40E_FCOE_DDP_DONE, &ddp->flags))
+               len = ddp->len;
+       i40e_fcoe_ddp_unmap(pf, ddp);
+out:
+       return len;
+}
+
+/**
+ * i40e_fcoe_sw_init - sets up the HW for FCoE
+ * @pf: pointer to PF
+ **/
+void i40e_init_pf_fcoe(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
+       pf->num_fcoe_qps = 0;
+       pf->fcoe_hmc_cntx_num = 0;
+       pf->fcoe_hmc_filt_num = 0;
+
+       if (!pf->hw.func_caps.fcoe) {
+               dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n");
+               return;
+       }
+
+       if (!pf->hw.func_caps.dcb) {
+               dev_warn(&pf->pdev->dev,
+                        "Hardware is not DCB capable not enabling FCoE.\n");
+               return;
+       }
+
+       /* enable FCoE hash filter */
+       val = rd32(hw, I40E_PFQF_HENA(1));
+       val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+       val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
+       val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
+       wr32(hw, I40E_PFQF_HENA(1), val);
+
+       /* enable flag */
+       pf->flags |= I40E_FLAG_FCOE_ENABLED;
+       pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
+
+       /* Reserve 4K DDP contexts and 20K filter size for FCoE */
+       pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+                               I40E_DMA_CNTX_BASE_SIZE;
+       pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
+                               BIT(I40E_HASH_FILTER_SIZE_16K) *
+                               I40E_HASH_FILTER_BASE_SIZE;
+
+       /* FCoE object: max 16K filter buckets and 4K DMA contexts */
+       pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K;
+       pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
+
+       /* Setup max frame with FCoE_MTU plus L2 overheads */
+       val = rd32(hw, I40E_GLFCOE_RCTL);
+       val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
+       val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+                << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
+       wr32(hw, I40E_GLFCOE_RCTL, val);
+
+       dev_info(&pf->pdev->dev, "FCoE is supported.\n");
+}
+
+#ifdef CONFIG_DCB
+/**
+ * i40e_get_fcoe_tc_map - Return TC map for FCoE APP
+ * @pf: pointer to PF
+ *
+ **/
+u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
+{
+       struct i40e_dcb_app_priority_table app;
+       struct i40e_hw *hw = &pf->hw;
+       u8 enabled_tc = 0;
+       u8 tc, i;
+       /* Get the FCoE APP TLV */
+       struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+       for (i = 0; i < dcbcfg->numapps; i++) {
+               app = dcbcfg->app[i];
+               if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+                   app.protocolid == ETH_P_FCOE) {
+                       tc = dcbcfg->etscfg.prioritytable[app.priority];
+                       enabled_tc |= BIT(tc);
+                       break;
+               }
+       }
+
+       /* TC0 if there is no TC defined for FCoE APP TLV */
+       enabled_tc = enabled_tc ? enabled_tc : 0x1;
+
+       return enabled_tc;
+}
+
+#endif
+/**
+ * i40e_fcoe_vsi_init - prepares the VSI context for creating a FCoE VSI
+ * @vsi: pointer to the associated VSI struct
+ * @ctxt: pointer to the associated VSI context to be passed to HW
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
+{
+       struct i40e_aqc_vsi_properties_data *info = &ctxt->info;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u8 enabled_tc = 0x1; /* Default as TC0 */
+
+       if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
+               dev_err(&pf->pdev->dev,
+                       "FCoE is not enabled for this device\n");
+               return -EPERM;
+       }
+
+       /* initialize the hardware for FCoE */
+       ctxt->pf_num = hw->pf_id;
+       ctxt->vf_num = 0;
+       ctxt->uplink_seid = vsi->uplink_seid;
+       ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+       ctxt->flags = I40E_AQ_VSI_TYPE_PF;
+
+       /* FCoE VSI would need the following sections */
+       info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+
+       /* FCoE VSI does not need these sections */
+       info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID |
+                                           I40E_AQ_VSI_PROP_VLAN_VALID |
+                                           I40E_AQ_VSI_PROP_CAS_PV_VALID |
+                                           I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
+                                           I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
+
+       if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+               info->valid_sections |=
+                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+               info->switch_id =
+                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+       }
+#ifdef CONFIG_DCB
+       enabled_tc = i40e_get_fcoe_tc_map(pf);
+#endif
+       i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
+
+       /* set up queue option section: only enable FCoE */
+       info->queueing_opt_flags = I40E_AQ_VSI_QUE_OPT_FCOE_ENA;
+
+       return 0;
+}
+
+/**
+ * i40e_fcoe_enable - this is the implementation of ndo_fcoe_enable,
+ * indicating the upper FCoE protocol stack is ready to use FCoE
+ * offload features.
+ *
+ * @netdev: pointer to the netdev that FCoE is created on
+ *
+ * Returns 0 on success
+ *
+ * in RTNL
+ *
+ **/
+int i40e_fcoe_enable(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+
+       if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
+               netdev_err(netdev, "HW does not support FCoE.\n");
+               return -ENODEV;
+       }
+
+       if (vsi->type != I40E_VSI_FCOE) {
+               netdev_err(netdev, "interface does not support FCoE.\n");
+               return -EBUSY;
+       }
+
+       atomic_inc(&fcoe->refcnt);
+
+       return 0;
+}
+
+/**
+ * i40e_fcoe_disable- disables FCoE for upper FCoE protocol stack.
+ * @dev: pointer to the netdev that FCoE is created on
+ *
+ * Returns 0 on success
+ *
+ **/
+int i40e_fcoe_disable(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+
+       if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
+               netdev_err(netdev, "device does not support FCoE\n");
+               return -ENODEV;
+       }
+       if (vsi->type != I40E_VSI_FCOE)
+               return -EBUSY;
+
+       if (!atomic_dec_and_test(&fcoe->refcnt))
+               return -EINVAL;
+
+       netdev_info(netdev, "FCoE disabled\n");
+
+       return 0;
+}
+
+/**
+ * i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP
+ * @fcoe: the FCoE sw object
+ * @dev: the device that the pool is associated with
+ * @cpu: the cpu for this pool
+ *
+ **/
+static void i40e_fcoe_dma_pool_free(struct i40e_fcoe *fcoe,
+                                   struct device *dev,
+                                   unsigned int cpu)
+{
+       struct i40e_fcoe_ddp_pool *ddp_pool;
+
+       ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+       if (!ddp_pool->pool) {
+               dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu);
+               return;
+       }
+       dma_pool_destroy(ddp_pool->pool);
+       ddp_pool->pool = NULL;
+}
+
+/**
+ * i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP
+ * @fcoe: the FCoE sw object
+ * @dev: the device that the pool is associated with
+ * @cpu: the cpu for this pool
+ *
+ * Returns 0 on successful or non zero on failure
+ *
+ **/
+static int i40e_fcoe_dma_pool_create(struct i40e_fcoe *fcoe,
+                                    struct device *dev,
+                                    unsigned int cpu)
+{
+       struct i40e_fcoe_ddp_pool *ddp_pool;
+       struct dma_pool *pool;
+       char pool_name[32];
+
+       ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+       if (ddp_pool->pool) {
+               dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu);
+               return 0;
+       }
+       snprintf(pool_name, sizeof(pool_name), "i40e_fcoe_ddp_%d", cpu);
+       pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX,
+                              I40E_FCOE_DDP_PTR_ALIGN, PAGE_SIZE);
+       if (!pool) {
+               dev_err(dev, "dma_pool_create %s failed\n", pool_name);
+               return -ENOMEM;
+       }
+       ddp_pool->pool = pool;
+       return 0;
+}
+
+/**
+ * i40e_fcoe_free_ddp_resources - release FCoE DDP resources
+ * @vsi: the vsi FCoE is associated with
+ *
+ **/
+void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       int cpu, i;
+
+       /* do nothing if not FCoE VSI */
+       if (vsi->type != I40E_VSI_FCOE)
+               return;
+
+       /* do nothing if no DDP pools were allocated */
+       if (!fcoe->ddp_pool)
+               return;
+
+       for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
+               i40e_fcoe_ddp_put(vsi->netdev, i);
+
+       for_each_possible_cpu(cpu)
+               i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu);
+
+       free_percpu(fcoe->ddp_pool);
+       fcoe->ddp_pool = NULL;
+
+       netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources released\n",
+                   vsi->id, vsi->seid);
+}
+
+/**
+ * i40e_fcoe_setup_ddp_resources - allocate per cpu DDP resources
+ * @vsi: the VSI FCoE is associated with
+ *
+ * Returns 0 on successful or non zero on failure
+ *
+ **/
+int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct device *dev = &pf->pdev->dev;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       unsigned int cpu;
+       int i;
+
+       if (vsi->type != I40E_VSI_FCOE)
+               return -ENODEV;
+
+       /* do nothing if no DDP pools were allocated */
+       if (fcoe->ddp_pool)
+               return -EEXIST;
+
+       /* allocate per CPU memory to track DDP pools */
+       fcoe->ddp_pool = alloc_percpu(struct i40e_fcoe_ddp_pool);
+       if (!fcoe->ddp_pool) {
+               dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n");
+               return -ENOMEM;
+       }
+
+       /* allocate pci pool for each cpu */
+       for_each_possible_cpu(cpu) {
+               if (!i40e_fcoe_dma_pool_create(fcoe, dev, cpu))
+                       continue;
+
+               dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu);
+               i40e_fcoe_free_ddp_resources(vsi);
+               return -ENOMEM;
+       }
+
+       /* initialize the sw context */
+       for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
+               i40e_fcoe_ddp_clear(&fcoe->ddp[i]);
+
+       netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources allocated\n",
+                   vsi->id, vsi->seid);
+
+       return 0;
+}
+
+/**
+ * i40e_fcoe_handle_status - check the Programming Status for FCoE
+ * @rx_ring: the Rx ring for this descriptor
+ * @rx_desc: the Rx descriptor for Programming Status, not a packet descriptor.
+ *
+ * Check if this is the Rx Programming Status descriptor write-back for FCoE.
+ * This is used to verify if the context/filter programming or invalidation
+ * requested by SW to the HW is successful or not and take actions accordingly.
+ **/
+void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc, u8 prog_id)
+{
+       struct i40e_pf *pf = rx_ring->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       struct i40e_fcoe_ddp *ddp;
+       u32 error;
+       u16 xid;
+       u64 qw;
+
+       /* we only care for FCoE here */
+       if (!i40e_fcoe_progid_is_fcoe(prog_id))
+               return;
+
+       xid = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param) &
+             (I40E_FCOE_DDP_MAX - 1);
+
+       if (!i40e_fcoe_xid_is_valid(xid))
+               return;
+
+       ddp = &fcoe->ddp[xid];
+       WARN_ON(xid != ddp->xid);
+
+       qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+               I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+
+       /* DDP context programming status: failure or success */
+       if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) {
+               if (I40E_RX_PROG_FCOE_ERROR_TBL_FULL(error)) {
+                       dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n",
+                               xid, ddp->xid);
+                       ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT;
+               }
+               if (I40E_RX_PROG_FCOE_ERROR_CONFLICT(error)) {
+                       dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n",
+                               xid, ddp->xid);
+                       ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT;
+               }
+       }
+
+       /* DDP context invalidation status: failure or success */
+       if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS) {
+               if (I40E_RX_PROG_FCOE_ERROR_INVLFAIL(error)) {
+                       dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n",
+                               xid, ddp->xid);
+                       ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT;
+               }
+               /* clear the flag so we can retry invalidation */
+               clear_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags);
+       }
+
+       /* unmap DMA */
+       i40e_fcoe_ddp_unmap(pf, ddp);
+       i40e_fcoe_ddp_clear(ddp);
+}
+
+/**
+ * i40e_fcoe_handle_offload - check ddp status and mark it done
+ * @adapter: i40e adapter
+ * @rx_desc: advanced rx descriptor
+ * @skb: the skb holding the received data
+ *
+ * This checks ddp status.
+ *
+ * Returns : < 0 indicates an error or not a FCOE ddp, 0 indicates
+ * not passing the skb to ULD, > 0 indicates is the length of data
+ * being ddped.
+ *
+ **/
+int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc,
+                            struct sk_buff *skb)
+{
+       struct i40e_pf *pf = rx_ring->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       struct fc_frame_header *fh = NULL;
+       struct i40e_fcoe_ddp *ddp = NULL;
+       u32 status, fltstat;
+       u32 error, fcerr;
+       int rc = -EINVAL;
+       u16 ptype;
+       u16 xid;
+       u64 qw;
+
+       /* check this rxd is for programming status */
+       qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       /* packet descriptor, check packet type */
+       ptype = (qw & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+       if (!i40e_rx_is_fcoe(ptype))
+               goto out_no_ddp;
+
+       error = (qw & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT;
+       fcerr = (error >> I40E_RX_DESC_ERROR_L3L4E_SHIFT) &
+                I40E_RX_DESC_FCOE_ERROR_MASK;
+
+       /* check stateless offload error */
+       if (unlikely(fcerr == I40E_RX_DESC_ERROR_L3L4E_PROT)) {
+               dev_err(&pf->pdev->dev, "Protocol Error\n");
+               skb->ip_summed = CHECKSUM_NONE;
+       } else {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       }
+
+       /* check hw status on ddp */
+       status = (qw & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT;
+       fltstat = (status >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+                  I40E_RX_DESC_FLTSTAT_FCMASK;
+
+       /* now we are ready to check DDP */
+       fh = i40e_fcoe_fc_frame_header(skb);
+       xid = i40e_fcoe_fc_get_xid(fh);
+       if (!i40e_fcoe_xid_is_valid(xid))
+               goto out_no_ddp;
+
+       /* non DDP normal receive, return to the protocol stack */
+       if (fltstat == I40E_RX_DESC_FLTSTAT_NOMTCH) {
+               dev_info(&pf->pdev->dev, "No DDP, fcerr:0x%x\n", fcerr);
+               goto out_no_ddp;
+       }
+
+       /* do we have a sw ddp context setup ? */
+       ddp = &fcoe->ddp[xid];
+       if (!ddp->sgl)
+               goto out_no_ddp;
+
+       /* fetch xid from hw rxd wb, which should match up the sw ctxt */
+       xid = le16_to_cpu(rx_desc->wb.qword0.lo_dword.mirr_fcoe.fcoe_ctx_id);
+       if (ddp->xid != xid) {
+               dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n",
+                       ddp->xid, xid);
+               goto out_put_ddp;
+       }
+
+       /* the same exchange has already errored out */
+       if (ddp->fcerr) {
+               dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n",
+                       xid, ddp->fcerr, fcerr);
+               goto out_put_ddp;
+       }
+
+       /* fcoe param is valid by now with correct DDPed length */
+       ddp->len = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param);
+       ddp->fcerr = fcerr;
+       /* header posting only, useful only for target mode and debugging */
+       if (fltstat == I40E_RX_DESC_FLTSTAT_DDP) {
+               /* For target mode, we get header of the last packet but it
+                * does not have the FCoE trailer field, i.e., CRC and EOF
+                * Ordered Set since they are offloaded by the HW, so fill
+                * it up correspondingly to allow the packet to pass through
+                * to the upper protocol stack.
+                */
+               u32 f_ctl = ntoh24(fh->fh_f_ctl);
+
+               if ((f_ctl & FC_FC_END_SEQ) &&
+                   (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) {
+                       struct fcoe_crc_eof *crc = NULL;
+
+                       crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+                       crc->fcoe_eof = FC_EOF_T;
+               } else {
+                       /* otherwise, drop the header only frame */
+                       rc = 0;
+                       goto out_no_ddp;
+               }
+       }
+
+out_put_ddp:
+       /* either we got RSP or we have an error, unmap DMA in both cases */
+       i40e_fcoe_ddp_unmap(pf, ddp);
+       if (ddp->len && !ddp->fcerr) {
+               int pkts;
+
+               rc = ddp->len;
+               i40e_fcoe_ddp_clear(ddp);
+               ddp->len = rc;
+               pkts = DIV_ROUND_UP(rc, 2048);
+               rx_ring->stats.bytes += rc;
+               rx_ring->stats.packets += pkts;
+               rx_ring->q_vector->rx.total_bytes += rc;
+               rx_ring->q_vector->rx.total_packets += pkts;
+               set_bit(__I40E_FCOE_DDP_DONE, &ddp->flags);
+       }
+
+out_no_ddp:
+       return rc;
+}
+
+/**
+ * i40e_fcoe_ddp_setup - called to set up ddp context
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ * @target_mode: indicates this is a DDP request for target
+ *
+ * Returns : 1 for success and 0 for no DDP on this I/O
+ **/
+static int i40e_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
+                              struct scatterlist *sgl, unsigned int sgc,
+                              int target_mode)
+{
+       static const unsigned int bufflen = I40E_FCOE_DDP_BUF_MIN;
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_fcoe_ddp_pool *ddp_pool;
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       unsigned int i, j, dmacount;
+       struct i40e_fcoe_ddp *ddp;
+       unsigned int firstoff = 0;
+       unsigned int thisoff = 0;
+       unsigned int thislen = 0;
+       struct scatterlist *sg;
+       dma_addr_t addr = 0;
+       unsigned int len;
+
+       if (xid >= I40E_FCOE_DDP_MAX) {
+               dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid);
+               return 0;
+       }
+
+       /* no DDP if we are already down or resetting */
+       if (test_bit(__I40E_DOWN, &pf->state) ||
+           test_bit(__I40E_NEEDS_RESTART, &pf->state)) {
+               dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n",
+                        xid);
+               return 0;
+       }
+
+       ddp = &fcoe->ddp[xid];
+       if (ddp->sgl) {
+               dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
+                     xid, ddp->sgl, ddp->sgc);
+               return 0;
+       }
+       i40e_fcoe_ddp_clear(ddp);
+
+       if (!fcoe->ddp_pool) {
+               dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid);
+               return 0;
+       }
+
+       ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
+       if (!ddp_pool->pool) {
+               dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid);
+               goto out_noddp;
+       }
+
+       /* setup dma from scsi command sgl */
+       dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
+       if (dmacount == 0) {
+               dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n",
+                        sgl, sgc);
+               goto out_noddp_unmap;
+       }
+
+       /* alloc the udl from our ddp pool */
+       ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
+       if (!ddp->udl) {
+               dev_info(&pf->pdev->dev,
+                        "Failed allocated ddp context, xid 0x%x\n", xid);
+               goto out_noddp_unmap;
+       }
+
+       j = 0;
+       ddp->len = 0;
+       for_each_sg(sgl, sg, dmacount, i) {
+               addr = sg_dma_address(sg);
+               len = sg_dma_len(sg);
+               ddp->len += len;
+               while (len) {
+                       /* max number of buffers allowed in one DDP context */
+                       if (j >= I40E_FCOE_DDP_BUFFCNT_MAX) {
+                               dev_info(&pf->pdev->dev,
+                                        "xid=%x:%d,%d,%d:addr=%llx not enough descriptors\n",
+                                        xid, i, j, dmacount, (u64)addr);
+                               goto out_noddp_free;
+                       }
+
+                       /* get the offset of length of current buffer */
+                       thisoff = addr & ((dma_addr_t)bufflen - 1);
+                       thislen = min_t(unsigned int, (bufflen - thisoff), len);
+                       /* all but the 1st buffer (j == 0)
+                        * must be aligned on bufflen
+                        */
+                       if ((j != 0) && (thisoff))
+                               goto out_noddp_free;
+
+                       /* all but the last buffer
+                        * ((i == (dmacount - 1)) && (thislen == len))
+                        * must end at bufflen
+                        */
+                       if (((i != (dmacount - 1)) || (thislen != len)) &&
+                             ((thislen + thisoff) != bufflen))
+                               goto out_noddp_free;
+
+                       ddp->udl[j] = (u64)(addr - thisoff);
+                       /* only the first buffer may have none-zero offset */
+                       if (j == 0)
+                               firstoff = thisoff;
+                       len -= thislen;
+                       addr += thislen;
+                       j++;
+               }
+       }
+       /* only the last buffer may have non-full bufflen */
+       ddp->lastsize = thisoff + thislen;
+       ddp->firstoff = firstoff;
+       ddp->list_len = j;
+       ddp->pool = ddp_pool->pool;
+       ddp->sgl = sgl;
+       ddp->sgc = sgc;
+       ddp->xid = xid;
+       if (target_mode)
+               set_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
+       set_bit(__I40E_FCOE_DDP_INITALIZED, &ddp->flags);
+
+       put_cpu();
+       return 1; /* Success */
+
+out_noddp_free:
+       dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
+       i40e_fcoe_ddp_clear(ddp);
+
+out_noddp_unmap:
+       dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
+out_noddp:
+       put_cpu();
+       return 0;
+}
+
+/**
+ * i40e_fcoe_ddp_get - called to set up ddp context in initiator mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ **/
+static int i40e_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+                            struct scatterlist *sgl, unsigned int sgc)
+{
+       return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
+}
+
+#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+/**
+ * i40e_fcoe_ddp_target - called to set up ddp context in target mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O. The DDP in target mode is a write I/O request
+ * from the initiator.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ **/
+static int i40e_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+                               struct scatterlist *sgl, unsigned int sgc)
+{
+       return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
+}
+
+#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */
+/**
+ * i40e_fcoe_program_ddp - programs the HW DDP related descriptors
+ * @tx_ring: transmit ring for this packet
+ * @skb:     the packet to be sent out
+ * @sof: the SOF to indicate class of service
+ *
+ * Determine if it is READ/WRITE command, and finds out if there is
+ * a matching SW DDP context for this command. DDP is applicable
+ * only in case of READ if initiator or WRITE in case of
+ * responder (via checking XFER_RDY).
+ *
+ * Note: caller checks sof and ddp sw context
+ *
+ * Returns : none
+ *
+ **/
+static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring,
+                                 struct sk_buff *skb,
+                                 struct i40e_fcoe_ddp *ddp, u8 sof)
+{
+       struct i40e_fcoe_filter_context_desc *filter_desc = NULL;
+       struct i40e_fcoe_queue_context_desc *queue_desc = NULL;
+       struct i40e_fcoe_ddp_context_desc *ddp_desc = NULL;
+       struct i40e_pf *pf = tx_ring->vsi->back;
+       u16 i = tx_ring->next_to_use;
+       struct fc_frame_header *fh;
+       u64 flags_rsvd_lanq = 0;
+       bool target_mode;
+
+       /* check if abort is still pending */
+       if (test_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) {
+               dev_warn(&pf->pdev->dev,
+                        "DDP abort is still pending xid:%hx and ddp->flags:%lx:\n",
+                        ddp->xid, ddp->flags);
+               return;
+       }
+
+       /* set the flag to indicate this is programmed */
+       if (test_and_set_bit(__I40E_FCOE_DDP_PROGRAMMED, &ddp->flags)) {
+               dev_warn(&pf->pdev->dev,
+                        "DDP is already programmed for xid:%hx and ddp->flags:%lx:\n",
+                        ddp->xid, ddp->flags);
+               return;
+       }
+
+       /* Prepare the DDP context descriptor */
+       ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i);
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       ddp_desc->type_cmd_foff_lsize =
+                               cpu_to_le64(I40E_TX_DESC_DTYPE_DDP_CTX  |
+                               ((u64)I40E_FCOE_DDP_CTX_DESC_BSIZE_4K  <<
+                               I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)        |
+                               ((u64)ddp->firstoff                    <<
+                               I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)       |
+                               ((u64)ddp->lastsize                    <<
+                               I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT));
+       ddp_desc->rsvd = cpu_to_le64(0);
+
+       /* target mode needs last packet in the sequence  */
+       target_mode = test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
+       if (target_mode)
+               ddp_desc->type_cmd_foff_lsize |=
+                       cpu_to_le64(I40E_FCOE_DDP_CTX_DESC_LASTSEQH);
+
+       /* Prepare queue_context descriptor */
+       queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++);
+       if (i == tx_ring->count)
+               i = 0;
+       queue_desc->dmaindx_fbase = cpu_to_le64(ddp->xid | ((u64)ddp->udp));
+       queue_desc->flen_tph = cpu_to_le64(ddp->list_len |
+                               ((u64)(I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC |
+                               I40E_FCOE_QUEUE_CTX_DESC_TPHDATA) <<
+                               I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT));
+
+       /* Prepare filter_context_desc */
+       filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i);
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       fh = (struct fc_frame_header *)skb_transport_header(skb);
+       filter_desc->param = cpu_to_le32(ntohl(fh->fh_parm_offset));
+       filter_desc->seqn = cpu_to_le16(ntohs(fh->fh_seq_cnt));
+       filter_desc->rsvd_dmaindx = cpu_to_le16(ddp->xid <<
+                               I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT);
+
+       flags_rsvd_lanq = I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP;
+       flags_rsvd_lanq |= (u64)(target_mode ?
+                       I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP :
+                       I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT);
+
+       flags_rsvd_lanq |= (u64)((sof == FC_SOF_I2 || sof == FC_SOF_N2) ?
+                       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 :
+                       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3);
+
+       flags_rsvd_lanq |= ((u64)skb->queue_mapping <<
+                               I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT);
+       filter_desc->flags_rsvd_lanq = cpu_to_le64(flags_rsvd_lanq);
+
+       /* By this time, all offload related descriptors has been programmed */
+       tx_ring->next_to_use = i;
+}
+
+/**
+ * i40e_fcoe_invalidate_ddp - invalidates DDP in case of abort
+ * @tx_ring: transmit ring for this packet
+ * @skb: the packet associated w/ this DDP invalidation, i.e., ABTS
+ * @ddp: the SW DDP context for this DDP
+ *
+ * Programs the Tx context descriptor to do DDP invalidation.
+ **/
+static void i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring,
+                                    struct sk_buff *skb,
+                                    struct i40e_fcoe_ddp *ddp)
+{
+       struct i40e_tx_context_desc *context_desc;
+       int i;
+
+       if (test_and_set_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags))
+               return;
+
+       i = tx_ring->next_to_use;
+       context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       context_desc->tunneling_params = cpu_to_le32(0);
+       context_desc->l2tag2 = cpu_to_le16(0);
+       context_desc->rsvd = cpu_to_le16(0);
+       context_desc->type_cmd_tso_mss = cpu_to_le64(
+               I40E_TX_DESC_DTYPE_FCOE_CTX |
+               (I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL <<
+               I40E_TXD_CTX_QW1_CMD_SHIFT) |
+               (I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND <<
+               I40E_TXD_CTX_QW1_CMD_SHIFT));
+       tx_ring->next_to_use = i;
+
+}
+
+/**
+ * i40e_fcoe_handle_ddp - check we should setup or invalidate DDP
+ * @tx_ring: transmit ring for this packet
+ * @skb: the packet to be sent out
+ * @sof: the SOF to indicate class of service
+ *
+ * Determine if it is ABTS/READ/XFER_RDY, and finds out if there is
+ * a matching SW DDP context for this command. DDP is applicable
+ * only in case of READ if initiator or WRITE in case of
+ * responder (via checking XFER_RDY). In case this is an ABTS, send
+ * just invalidate the context.
+ **/
+static void i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring,
+                                struct sk_buff *skb, u8 sof)
+{
+       struct i40e_pf *pf = tx_ring->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       struct fc_frame_header *fh;
+       struct i40e_fcoe_ddp *ddp;
+       u32 f_ctl;
+       u8 r_ctl;
+       u16 xid;
+
+       fh = (struct fc_frame_header *)skb_transport_header(skb);
+       f_ctl = ntoh24(fh->fh_f_ctl);
+       r_ctl = fh->fh_r_ctl;
+       ddp = NULL;
+
+       if ((r_ctl == FC_RCTL_DD_DATA_DESC) && (f_ctl & FC_FC_EX_CTX)) {
+               /* exchange responder? if so, XFER_RDY for write */
+               xid = ntohs(fh->fh_rx_id);
+               if (i40e_fcoe_xid_is_valid(xid)) {
+                       ddp = &fcoe->ddp[xid];
+                       if ((ddp->xid == xid) &&
+                           (test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
+                               i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
+               }
+       } else if (r_ctl == FC_RCTL_DD_UNSOL_CMD) {
+               /* exchange originator, check READ cmd */
+               xid = ntohs(fh->fh_ox_id);
+               if (i40e_fcoe_xid_is_valid(xid)) {
+                       ddp = &fcoe->ddp[xid];
+                       if ((ddp->xid == xid) &&
+                           (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
+                               i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
+               }
+       } else if (r_ctl == FC_RCTL_BA_ABTS) {
+               /* exchange originator, check ABTS */
+               xid = ntohs(fh->fh_ox_id);
+               if (i40e_fcoe_xid_is_valid(xid)) {
+                       ddp = &fcoe->ddp[xid];
+                       if ((ddp->xid == xid) &&
+                           (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
+                               i40e_fcoe_invalidate_ddp(tx_ring, skb, ddp);
+               }
+       }
+}
+
+/**
+ * i40e_fcoe_tso - set up FCoE TSO
+ * @tx_ring:  ring to send buffer on
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  the tso header length
+ * @sof: the SOF to indicate class of service
+ *
+ * Note must already have sof checked to be either class 2 or class 3 before
+ * calling this function.
+ *
+ * Returns 1 to indicate sequence segmentation offload is properly setup
+ * or returns 0 to indicate no tso is needed, otherwise returns error
+ * code to drop the frame.
+ **/
+static int i40e_fcoe_tso(struct i40e_ring *tx_ring,
+                        struct sk_buff *skb,
+                        u32 tx_flags, u8 *hdr_len, u8 sof)
+{
+       struct i40e_tx_context_desc *context_desc;
+       u32 cd_type, cd_cmd, cd_tso_len, cd_mss;
+       struct fc_frame_header *fh;
+       u64 cd_type_cmd_tso_mss;
+
+       /* must match gso type as FCoE */
+       if (!skb_is_gso(skb))
+               return 0;
+
+       /* is it the expected gso type for FCoE ?*/
+       if (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) {
+               netdev_err(skb->dev,
+                          "wrong gso type %d:expecting SKB_GSO_FCOE\n",
+                          skb_shinfo(skb)->gso_type);
+               return -EINVAL;
+       }
+
+       /* header and trailer are inserted by hw */
+       *hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) +
+                  sizeof(struct fcoe_crc_eof);
+
+       /* check sof to decide a class 2 or 3 TSO */
+       if (likely(i40e_fcoe_sof_is_class3(sof)))
+               cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3;
+       else
+               cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2;
+
+       /* param field valid? */
+       fh = (struct fc_frame_header *)skb_transport_header(skb);
+       if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
+               cd_cmd |= I40E_FCOE_TX_CTX_DESC_RELOFF;
+
+       /* fill the field values */
+       cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX;
+       cd_tso_len = skb->len - *hdr_len;
+       cd_mss = skb_shinfo(skb)->gso_size;
+       cd_type_cmd_tso_mss =
+               ((u64)cd_type  << I40E_TXD_CTX_QW1_DTYPE_SHIFT)     |
+               ((u64)cd_cmd     << I40E_TXD_CTX_QW1_CMD_SHIFT)     |
+               ((u64)cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+               ((u64)cd_mss     << I40E_TXD_CTX_QW1_MSS_SHIFT);
+
+       /* grab the next descriptor */
+       context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
+       tx_ring->next_to_use++;
+       if (tx_ring->next_to_use == tx_ring->count)
+               tx_ring->next_to_use = 0;
+
+       context_desc->tunneling_params = 0;
+       context_desc->l2tag2 = cpu_to_le16((tx_flags & I40E_TX_FLAGS_VLAN_MASK)
+                                           >> I40E_TX_FLAGS_VLAN_SHIFT);
+       context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+
+       return 1;
+}
+
+/**
+ * i40e_fcoe_tx_map - build the tx descriptor
+ * @tx_ring:  ring to send buffer on
+ * @skb:      send buffer
+ * @first:    first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len:  ptr to the size of the packet header
+ * @eof:      the frame eof value
+ *
+ * Note, for FCoE, sof and eof are already checked
+ **/
+static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
+                            struct sk_buff *skb,
+                            struct i40e_tx_buffer *first,
+                            u32 tx_flags, u8 hdr_len, u8 eof)
+{
+       u32 td_offset = 0;
+       u32 td_cmd = 0;
+       u32 maclen;
+
+       /* insert CRC */
+       td_cmd = I40E_TX_DESC_CMD_ICRC;
+
+       /* setup MACLEN */
+       maclen = skb_network_offset(skb);
+       if (tx_flags & I40E_TX_FLAGS_SW_VLAN)
+               maclen += sizeof(struct vlan_hdr);
+
+       if (skb->protocol == htons(ETH_P_FCOE)) {
+               /* for FCoE, maclen should exclude ether type */
+               maclen -= 2;
+               /* setup type as FCoE and EOF insertion */
+               td_cmd |= (I40E_TX_DESC_CMD_FCOET | i40e_fcoe_ctxt_eof(eof));
+               /* setup FCoELEN and FCLEN */
+               td_offset |= ((((sizeof(struct fcoe_hdr) + 2) >> 2) <<
+                               I40E_TX_DESC_LENGTH_IPLEN_SHIFT) |
+                             ((sizeof(struct fc_frame_header) >> 2) <<
+                               I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT));
+               /* trim to exclude trailer */
+               pskb_trim(skb, skb->len - sizeof(struct fcoe_crc_eof));
+       }
+
+       /* MACLEN is ether header length in words not bytes */
+       td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+       i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset);
+}
+
+/**
+ * i40e_fcoe_set_skb_header - adjust skb header point for FIP/FCoE/FC
+ * @skb: the skb to be adjusted
+ *
+ * Returns true if this skb is a FCoE/FIP or VLAN carried FCoE/FIP and then
+ * adjusts the skb header pointers correspondingly. Otherwise, returns false.
+ **/
+static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb)
+{
+       __be16 protocol = skb->protocol;
+
+       skb_reset_mac_header(skb);
+       skb->mac_len = sizeof(struct ethhdr);
+       if (protocol == htons(ETH_P_8021Q)) {
+               struct vlan_ethhdr *veth = (struct vlan_ethhdr *) eth_hdr(skb);
+
+               protocol = veth->h_vlan_encapsulated_proto;
+               skb->mac_len += sizeof(struct vlan_hdr);
+       }
+
+       /* FCoE or FIP only */
+       if ((protocol != htons(ETH_P_FIP)) &&
+           (protocol != htons(ETH_P_FCOE)))
+               return -EINVAL;
+
+       /* set header to L2 of FCoE/FIP */
+       skb_set_network_header(skb, skb->mac_len);
+       if (protocol == htons(ETH_P_FIP))
+               return 0;
+
+       /* set header to L3 of FC */
+       skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
+       return 0;
+}
+
+/**
+ * i40e_fcoe_xmit_frame - transmit buffer
+ * @skb:     send buffer
+ * @netdev:  the fcoe netdev
+ *
+ * Returns 0 if sent, else an error code
+ **/
+static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
+                                       struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(skb->dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
+       struct i40e_tx_buffer *first;
+       u32 tx_flags = 0;
+       u8 hdr_len = 0;
+       u8 sof = 0;
+       u8 eof = 0;
+       int fso;
+
+       if (i40e_fcoe_set_skb_header(skb))
+               goto out_drop;
+
+       if (!i40e_xmit_descriptor_count(skb, tx_ring))
+               return NETDEV_TX_BUSY;
+
+       /* prepare the xmit flags */
+       if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+               goto out_drop;
+
+       /* record the location of the first descriptor for this packet */
+       first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+       /* FIP is a regular L2 traffic w/o offload */
+       if (skb->protocol == htons(ETH_P_FIP))
+               goto out_send;
+
+       /* check sof and eof, only supports FC Class 2 or 3 */
+       if (i40e_fcoe_fc_sof(skb, &sof) || i40e_fcoe_fc_eof(skb, &eof)) {
+               netdev_err(netdev, "SOF/EOF error:%02x - %02x\n", sof, eof);
+               goto out_drop;
+       }
+
+       /* always do FCCRC for FCoE */
+       tx_flags |= I40E_TX_FLAGS_FCCRC;
+
+       /* check we should do sequence offload */
+       fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof);
+       if (fso < 0)
+               goto out_drop;
+       else if (fso)
+               tx_flags |= I40E_TX_FLAGS_FSO;
+       else
+               i40e_fcoe_handle_ddp(tx_ring, skb, sof);
+
+out_send:
+       /* send out the packet */
+       i40e_fcoe_tx_map(tx_ring, skb, first, tx_flags, hdr_len, eof);
+
+       i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+       return NETDEV_TX_OK;
+
+out_drop:
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+/**
+ * i40e_fcoe_change_mtu - NDO callback to change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns error as operation not permitted
+ *
+ **/
+static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       netdev_warn(netdev, "MTU change is not supported on FCoE interfaces\n");
+       return -EPERM;
+}
+
+#ifdef HAVE_NDO_SET_FEATURES
+/**
+ * i40e_fcoe_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ *
+ **/
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static int i40e_fcoe_set_features(struct net_device *netdev, u32 features)
+#else
+static int i40e_fcoe_set_features(struct net_device *netdev,
+                            netdev_features_t features)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               i40e_vlan_stripping_enable(vsi);
+#else
+       if (features & NETIF_F_HW_VLAN_RX)
+               i40e_vlan_stripping_enable(vsi);
+#endif
+       else
+               i40e_vlan_stripping_disable(vsi);
+
+       return 0;
+}
+
+#endif /* HAVE_NDO_SET_FEATURES */
+
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops i40e_fcoe_netdev_ops = {
+       .ndo_open               = i40e_open,
+       .ndo_stop               = i40e_close,
+#ifdef HAVE_NDO_GET_STATS64
+       .ndo_get_stats64        = i40e_get_netdev_stats_struct,
+#else
+       .ndo_get_stats          = i40e_get_netdev_stats_struct,
+#endif
+       .ndo_set_rx_mode        = i40e_set_rx_mode,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = i40e_set_mac,
+       .ndo_change_mtu         = i40e_fcoe_change_mtu,
+#if defined(HAVE_PTP_1588_CLOCK)
+       .ndo_do_ioctl           = i40e_ioctl,
+#endif
+       .ndo_tx_timeout         = i40e_tx_timeout,
+       .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
+#ifdef HAVE_SETUP_TC
+       .ndo_setup_tc           = i40e_setup_tc,
+#endif /* HAVE_SETUP_TC */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = i40e_netpoll,
+#endif
+       .ndo_start_xmit         = i40e_fcoe_xmit_frame,
+       .ndo_fcoe_enable        = i40e_fcoe_enable,
+       .ndo_fcoe_disable       = i40e_fcoe_disable,
+       .ndo_fcoe_ddp_setup     = i40e_fcoe_ddp_get,
+       .ndo_fcoe_ddp_done      = i40e_fcoe_ddp_put,
+#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+       .ndo_fcoe_ddp_target    = i40e_fcoe_ddp_target,
+#endif
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext i40e_fcoe_netdev_ops_ext = {
+       .size = sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#ifdef HAVE_NDO_SET_FEATURES
+       .ndo_set_features       = i40e_fcoe_set_features,
+#endif /* HAVE_NDO_SET_FEATURES */
+};
+#endif /* HAVE_NET_DEVICE_OPS */
+
+/* fcoe network device type */
+static struct device_type fcoe_netdev_type = {
+       .name = "fcoe",
+};
+
+/**
+ * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI
+ * @vsi: pointer to the associated VSI struct
+ * @ctxt: pointer to the associated VSI context to be passed to HW
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
+{
+       struct i40e_hw *hw = &vsi->back->hw;
+       struct i40e_pf *pf = vsi->back;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       u32 hw_features;
+#endif
+
+       if (vsi->type != I40E_VSI_FCOE)
+               return;
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+       netdev->features = (NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_FILTER);
+
+       netdev->vlan_features = netdev->features;
+       netdev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+                                  NETIF_F_HW_VLAN_CTAG_RX |
+                                  NETIF_F_HW_VLAN_CTAG_FILTER);
+#else
+       netdev->features = (NETIF_F_HW_VLAN_TX |
+                           NETIF_F_HW_VLAN_RX |
+                           NETIF_F_HW_VLAN_FILTER);
+
+       netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_TX |
+                                                    NETIF_F_HW_VLAN_RX |
+                                                    NETIF_F_HW_VLAN_FILTER);
+#endif
+       netdev->fcoe_ddp_xid = I40E_FCOE_DDP_MAX - 1;
+       netdev->features |= NETIF_F_ALL_FCOE;
+       netdev->vlan_features |= NETIF_F_ALL_FCOE;
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       hw_features = get_netdev_hw_features(netdev);
+       hw_features |= netdev->features;
+       set_netdev_hw_features(netdev, hw_features);
+#else
+       netdev->hw_features |= netdev->features;
+#endif
+#endif
+#ifdef IFF_UNICAST_FLT
+       netdev->priv_flags |= IFF_UNICAST_FLT;
+#endif
+#ifdef IFF_SUPP_NOFCS
+       netdev->priv_flags |= IFF_SUPP_NOFCS;
+#endif
+
+       strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
+       netdev->mtu = FCOE_MTU;
+       SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+       SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type);
+#ifdef HAVE_NETDEV_PORT
+       /* set different dev_port value 1 for FCoE netdev than the default
+        * zero dev_port value for PF netdev, this helps biosdevname user
+        * tool to differentiate them correctly while both attached to the
+        * same PCI function.
+        */
+       netdev->dev_port = 1;
+#endif
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+       i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
+       i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
+       i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
+       i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       /* use san mac */
+       ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
+#ifdef ETHTOOL_GPERMADDR
+       ether_addr_copy(netdev->perm_addr, hw->mac.san_addr);
+#endif
+#ifdef HAVE_NET_DEVICE_OPS
+       /* fcoe netdev ops */
+       netdev->netdev_ops = &i40e_fcoe_netdev_ops;
+#endif
+}
+
+/**
+ * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI
+ * @pf: the PF that VSI is associated with
+ *
+ **/
+void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi;
+       u16 seid;
+       int i;
+
+       if (!(pf->flags & I40E_FLAG_FCOE_ENABLED))
+               return;
+
+       BUG_ON(!pf->vsi[pf->lan_vsi]);
+
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               vsi = pf->vsi[i];
+               if (vsi && vsi->type == I40E_VSI_FCOE) {
+                       dev_warn(&pf->pdev->dev,
+                                "FCoE VSI already created\n");
+                       return;
+               }
+       }
+
+       seid = pf->vsi[pf->lan_vsi]->seid;
+       vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0);
+       if (vsi) {
+               dev_dbg(&pf->pdev->dev,
+                       "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n",
+                       vsi->seid, vsi->id, vsi->uplink_seid, seid);
+       } else {
+               dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
+       }
+}
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_fcoe.h
new file mode 100644 (file)
index 0000000..88a2bac
--- /dev/null
@@ -0,0 +1,127 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_FCOE_H_
+#define _I40E_FCOE_H_
+
+/* FCoE HW context helper macros */
+#define I40E_DDP_CONTEXT_DESC(R, i)     \
+       (&(((struct i40e_fcoe_ddp_context_desc *)((R)->desc))[i]))
+
+#define I40E_QUEUE_CONTEXT_DESC(R, i)   \
+       (&(((struct i40e_fcoe_queue_context_desc *)((R)->desc))[i]))
+
+#define I40E_FILTER_CONTEXT_DESC(R, i)  \
+       (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i]))
+
+/* receive queue descriptor filter status for FCoE */
+#define I40E_RX_DESC_FLTSTAT_FCMASK    0x3
+#define I40E_RX_DESC_FLTSTAT_NOMTCH    0x0     /* no ddp context match */
+#define I40E_RX_DESC_FLTSTAT_NODDP     0x1     /* no ddp due to error */
+#define I40E_RX_DESC_FLTSTAT_DDP       0x2     /* DDPed payload, post header */
+#define I40E_RX_DESC_FLTSTAT_FCPRSP    0x3     /* FCP_RSP */
+
+/* receive queue descriptor error codes for FCoE */
+#define I40E_RX_DESC_FCOE_ERROR_MASK           \
+       (I40E_RX_DESC_ERROR_L3L4E_PROT |        \
+        I40E_RX_DESC_ERROR_L3L4E_FC |          \
+        I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR |    \
+        I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN)
+
+/* receive queue descriptor programming error */
+#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL(e)    \
+       (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) & 0x1)
+
+#define I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)    \
+       (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
+
+#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT   \
+       BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT   \
+       BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+
+#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e)    \
+       I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
+#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT   \
+       I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT
+
+/* FCoE DDP related definitions */
+#define I40E_FCOE_MIN_XID      0x0000  /* the min xid supported by fcoe_sw */
+#define I40E_FCOE_MAX_XID      0x0FFF  /* the max xid supported by fcoe_sw */
+#define I40E_FCOE_DDP_BUFFCNT_MAX      512     /* 9 bits bufcnt */
+#define I40E_FCOE_DDP_PTR_ALIGN                16
+#define I40E_FCOE_DDP_PTR_MAX  (I40E_FCOE_DDP_BUFFCNT_MAX * sizeof(dma_addr_t))
+#define I40E_FCOE_DDP_BUF_MIN  4096
+#define I40E_FCOE_DDP_MAX      2048
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT  8
+
+/* supported netdev features for FCoE */
+#define I40E_FCOE_NETIF_FEATURES (NETIF_F_ALL_FCOE | \
+       NETIF_F_HW_VLAN_CTAG_TX | \
+       NETIF_F_HW_VLAN_CTAG_RX | \
+       NETIF_F_HW_VLAN_CTAG_FILTER)
+
+/* DDP context flags */
+enum i40e_fcoe_ddp_flags {
+       __I40E_FCOE_DDP_NONE = 1,
+       __I40E_FCOE_DDP_TARGET,
+       __I40E_FCOE_DDP_INITALIZED,
+       __I40E_FCOE_DDP_PROGRAMMED,
+       __I40E_FCOE_DDP_DONE,
+       __I40E_FCOE_DDP_ABORTED,
+       __I40E_FCOE_DDP_UNMAPPED,
+};
+
+/* DDP SW context struct */
+struct i40e_fcoe_ddp {
+       int len;
+       u16 xid;
+       u16 firstoff;
+       u16 lastsize;
+       u16 list_len;
+       u8 fcerr;
+       u8 prerr;
+       unsigned long flags;
+       unsigned int sgc;
+       struct scatterlist *sgl;
+       dma_addr_t udp;
+       u64 *udl;
+       struct dma_pool *pool;
+
+};
+
+struct i40e_fcoe_ddp_pool {
+       struct dma_pool *pool;
+};
+
+struct i40e_fcoe {
+       unsigned long mode;
+       atomic_t refcnt;
+       struct i40e_fcoe_ddp_pool __percpu *ddp_pool;
+       struct i40e_fcoe_ddp ddp[I40E_FCOE_DDP_MAX];
+};
+
+#endif /* _I40E_FCOE_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_helper.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_helper.h
new file mode 100644 (file)
index 0000000..003291a
--- /dev/null
@@ -0,0 +1,138 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HELPER_H_
+#define _I40E_HELPER_H_
+
+/**
+ * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+inline int i40e_allocate_dma_mem_d(struct i40e_hw *hw,
+                                  struct i40e_dma_mem *mem,
+                                  u64 size, u32 alignment)
+{
+       struct i40e_pf *nf = (struct i40e_pf *)hw->back;
+
+       mem->size = ALIGN(size, alignment);
+       mem->va = dma_zalloc_coherent(&nf->pdev->dev, mem->size,
+                                     &mem->pa, GFP_KERNEL);
+       if (!mem->va)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ * i40e_free_dma_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+inline int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+       struct i40e_pf *nf = (struct i40e_pf *)hw->back;
+
+       dma_free_coherent(&nf->pdev->dev, mem->size, mem->va, mem->pa);
+       mem->va = NULL;
+       mem->pa = 0;
+       mem->size = 0;
+
+       return 0;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+inline int i40e_allocate_virt_mem_d(struct i40e_hw *hw,
+                                   struct i40e_virt_mem *mem,
+                                   u32 size)
+{
+       mem->size = size;
+       mem->va = kzalloc(size, GFP_KERNEL);
+
+       if (!mem->va)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ * i40e_free_virt_mem_d - OS specific memory free for shared code
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ **/
+inline int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+       /* it's ok to kfree a NULL pointer */
+       kfree(mem->va);
+       mem->va = NULL;
+       mem->size = 0;
+
+       return 0;
+}
+
+/**
+ * i40e_init_spinlock_d - OS specific spinlock init for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_init_spinlock_d(struct i40e_spinlock *sp)
+{
+       mutex_init((struct mutex *)sp);
+}
+
+/**
+ * i40e_acquire_spinlock_d - OS specific spinlock acquire for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
+{
+       mutex_lock((struct mutex *)sp);
+}
+
+/**
+ * i40e_release_spinlock_d - OS specific spinlock release for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_release_spinlock_d(struct i40e_spinlock *sp)
+{
+       mutex_unlock((struct mutex *)sp);
+}
+
+/**
+ * i40e_destroy_spinlock_d - OS specific spinlock destroy for shared code
+ * @sp: pointer to a spinlock declared in driver space
+ **/
+inline void i40e_destroy_spinlock_d(struct i40e_spinlock *sp)
+{
+       mutex_destroy((struct mutex *)sp);
+}
+#endif /* _I40E_HELPER_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.c
new file mode 100644 (file)
index 0000000..948949c
--- /dev/null
@@ -0,0 +1,365 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#ifndef I40E_NO_TYPE_HEADER
+#include "i40e_type.h"
+#endif
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+                                             struct i40e_hmc_info *hmc_info,
+                                             u32 sd_index,
+                                             enum i40e_sd_entry_type type,
+                                             u64 direct_mode_sz)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       struct i40e_hmc_sd_entry *sd_entry;
+       enum   i40e_memory_type mem_type;
+       bool dma_mem_alloc_done = false;
+       struct i40e_dma_mem mem;
+       u64 alloc_len;
+
+       if (NULL == hmc_info->sd_table.sd_entry) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
+               goto exit;
+       }
+
+       if (sd_index >= hmc_info->sd_table.sd_cnt) {
+               ret_code = I40E_ERR_INVALID_SD_INDEX;
+               hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
+               goto exit;
+       }
+
+       sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+       if (!sd_entry->valid) {
+               if (I40E_SD_TYPE_PAGED == type) {
+                       mem_type = i40e_mem_pd;
+                       alloc_len = I40E_HMC_PAGED_BP_SIZE;
+               } else {
+                       mem_type = i40e_mem_bp_jumbo;
+                       alloc_len = direct_mode_sz;
+               }
+
+               /* allocate a 4K pd page or 2M backing page */
+               ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+                                                I40E_HMC_PD_BP_BUF_ALIGNMENT);
+               if (ret_code)
+                       goto exit;
+               dma_mem_alloc_done = true;
+               if (I40E_SD_TYPE_PAGED == type) {
+                       ret_code = i40e_allocate_virt_mem(hw,
+                                       &sd_entry->u.pd_table.pd_entry_virt_mem,
+                                       sizeof(struct i40e_hmc_pd_entry) * 512);
+                       if (ret_code)
+                               goto exit;
+                       sd_entry->u.pd_table.pd_entry =
+                               (struct i40e_hmc_pd_entry *)
+                               sd_entry->u.pd_table.pd_entry_virt_mem.va;
+                       i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
+                                   &mem, sizeof(struct i40e_dma_mem),
+                                   I40E_NONDMA_TO_NONDMA);
+               } else {
+                       i40e_memcpy(&sd_entry->u.bp.addr,
+                                   &mem, sizeof(struct i40e_dma_mem),
+                                   I40E_NONDMA_TO_NONDMA);
+                       sd_entry->u.bp.sd_pd_index = sd_index;
+               }
+               /* initialize the sd entry */
+               hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+               /* increment the ref count */
+               I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+       }
+       /* Increment backing page reference count */
+       if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+               I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+       if (I40E_SUCCESS != ret_code)
+               if (dma_mem_alloc_done)
+                       i40e_free_dma_mem(hw, &mem);
+
+       return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ *     1. Initializes the pd entry
+ *     2. Adds pd_entry in the pd_table
+ *     3. Mark the entry valid in i40e_hmc_pd_entry structure
+ *     4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ *     1. The memory for pd should be pinned down, physically contiguous and
+ *        aligned on 4K boundary and zeroed memory.
+ *     2. It should be 4K in size.
+ **/
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+                                             struct i40e_hmc_info *hmc_info,
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       struct i40e_hmc_pd_table *pd_table;
+       struct i40e_hmc_pd_entry *pd_entry;
+       struct i40e_dma_mem mem;
+       struct i40e_dma_mem *page = &mem;
+       u32 sd_idx, rel_pd_idx;
+       u64 *pd_addr;
+       u64 page_desc;
+
+       if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+               ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+               hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
+               goto exit;
+       }
+
+       /* find corresponding sd */
+       sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+       if (I40E_SD_TYPE_PAGED !=
+           hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+               goto exit;
+
+       rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+       pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+       pd_entry = &pd_table->pd_entry[rel_pd_idx];
+       if (!pd_entry->valid) {
+               if (rsrc_pg) {
+                       pd_entry->rsrc_pg = true;
+                       page = rsrc_pg;
+               } else {
+                       /* allocate a 4K backing page */
+                       ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+                                               I40E_HMC_PAGED_BP_SIZE,
+                                               I40E_HMC_PD_BP_BUF_ALIGNMENT);
+                       if (ret_code)
+                               goto exit;
+                       pd_entry->rsrc_pg = false;
+               }
+
+               i40e_memcpy(&pd_entry->bp.addr, page,
+                           sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
+               pd_entry->bp.sd_pd_index = pd_index;
+               pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+               /* Set page address and valid bit */
+               page_desc = page->pa | 0x1;
+
+               pd_addr = (u64 *)pd_table->pd_page_addr.va;
+               pd_addr += rel_pd_idx;
+
+               /* Add the backing page physical address in the pd entry */
+               i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
+                           I40E_NONDMA_TO_DMA);
+
+               pd_entry->sd_index = sd_idx;
+               pd_entry->valid = true;
+               I40E_INC_PD_REFCNT(pd_table);
+       }
+       I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+       return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ *     1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ *        (for direct address mode) invalid.
+ *     2. Write to register PMPDINV to invalidate the backing page in FV cache
+ *     3. Decrement the ref count for the pd _entry
+ * assumptions:
+ *     1. Caller can deallocate the memory used by backing storage after this
+ *        function returns.
+ **/
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+                                       struct i40e_hmc_info *hmc_info,
+                                       u32 idx)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       struct i40e_hmc_pd_entry *pd_entry;
+       struct i40e_hmc_pd_table *pd_table;
+       struct i40e_hmc_sd_entry *sd_entry;
+       u32 sd_idx, rel_pd_idx;
+       u64 *pd_addr;
+
+       /* calculate index */
+       sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+       rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+       if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+               ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+               hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
+               goto exit;
+       }
+       sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+       if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+               ret_code = I40E_ERR_INVALID_SD_TYPE;
+               hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
+               goto exit;
+       }
+       /* get the entry and decrease its ref counter */
+       pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+       pd_entry = &pd_table->pd_entry[rel_pd_idx];
+       I40E_DEC_BP_REFCNT(&pd_entry->bp);
+       if (pd_entry->bp.ref_cnt)
+               goto exit;
+
+       /* mark the entry invalid */
+       pd_entry->valid = false;
+       I40E_DEC_PD_REFCNT(pd_table);
+       pd_addr = (u64 *)pd_table->pd_page_addr.va;
+       pd_addr += rel_pd_idx;
+       i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
+       I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+
+       /* free memory here */
+       if (!pd_entry->rsrc_pg)
+               ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+       if (I40E_SUCCESS != ret_code)
+               goto exit;
+       if (!pd_table->ref_cnt)
+               i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+       return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+                                            u32 idx)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       struct i40e_hmc_sd_entry *sd_entry;
+
+       /* get the entry and decrease its ref counter */
+       sd_entry = &hmc_info->sd_table.sd_entry[idx];
+       I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+       if (sd_entry->u.bp.ref_cnt) {
+               ret_code = I40E_ERR_NOT_READY;
+               goto exit;
+       }
+       I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+       /* mark the entry invalid */
+       sd_entry->valid = false;
+exit:
+       return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+                                           struct i40e_hmc_info *hmc_info,
+                                           u32 idx, bool is_pf)
+{
+       struct i40e_hmc_sd_entry *sd_entry;
+
+       if (!is_pf)
+               return I40E_NOT_SUPPORTED;
+
+       /* get the entry and decrease its ref counter */
+       sd_entry = &hmc_info->sd_table.sd_entry[idx];
+       I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+       return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+                                              u32 idx)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       struct i40e_hmc_sd_entry *sd_entry;
+
+       sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+       if (sd_entry->u.pd_table.ref_cnt) {
+               ret_code = I40E_ERR_NOT_READY;
+               goto exit;
+       }
+
+       /* mark the entry invalid */
+       sd_entry->valid = false;
+
+       I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+       return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+                                             struct i40e_hmc_info *hmc_info,
+                                             u32 idx, bool is_pf)
+{
+       struct i40e_hmc_sd_entry *sd_entry;
+
+       if (!is_pf)
+               return I40E_NOT_SUPPORTED;
+
+       sd_entry = &hmc_info->sd_table.sd_entry[idx];
+       I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+       return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+}
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_hmc.h
new file mode 100644 (file)
index 0000000..ccfc19c
--- /dev/null
@@ -0,0 +1,238 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE                0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD          512
+#define I40E_HMC_DIRECT_BP_SIZE                0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE         4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT   4096
+#define I40E_FIRST_VF_FPM_ID           16
+
+struct i40e_hmc_obj_info {
+       u64 base;       /* base addr in FPM */
+       u32 max_cnt;    /* max count available for this hmc func */
+       u32 cnt;        /* count of objects driver actually wants to create */
+       u64 size;       /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+       I40E_SD_TYPE_INVALID = 0,
+       I40E_SD_TYPE_PAGED   = 1,
+       I40E_SD_TYPE_DIRECT  = 2
+};
+
+struct i40e_hmc_bp {
+       enum i40e_sd_entry_type entry_type;
+       struct i40e_dma_mem addr; /* populate to be used by hw */
+       u32 sd_pd_index;
+       u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+       struct i40e_hmc_bp bp;
+       u32 sd_index;
+       bool rsrc_pg;
+       bool valid;
+};
+
+struct i40e_hmc_pd_table {
+       struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+       struct i40e_hmc_pd_entry  *pd_entry; /* [512] for sw book keeping */
+       struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+       u32 ref_cnt;
+       u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+       enum i40e_sd_entry_type entry_type;
+       bool valid;
+
+       union {
+               struct i40e_hmc_pd_table pd_table;
+               struct i40e_hmc_bp bp;
+       } u;
+};
+
+struct i40e_hmc_sd_table {
+       struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+       u32 sd_cnt;
+       u32 ref_cnt;
+       struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+       u32 signature;
+       /* equals to pci func num for PF and dynamically allocated for VFs */
+       u8 hmc_fn_id;
+       u16 first_sd_index; /* index of the first available SD */
+
+       /* hmc objects */
+       struct i40e_hmc_obj_info *hmc_obj;
+       struct i40e_virt_mem hmc_obj_virt_mem;
+       struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table)   ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table)   ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp)         ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table)   ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table)   ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp)         ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type)                   \
+{                                                                      \
+       u32 val1, val2, val3;                                           \
+       val1 = (u32)(upper_32_bits(pa));                                \
+       val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT <<                    \
+                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |              \
+               ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
+               I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
+               BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);              \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
+       wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
+       wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
+       wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type)                     \
+{                                                                      \
+       u32 val2, val3;                                                 \
+       val2 = (I40E_HMC_MAX_BP_COUNT <<                                \
+               I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
+               ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
+               I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
+       wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
+       wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
+       wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx)                  \
+       wr32((hw), I40E_PFHMC_PDINV,                                    \
+           (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |             \
+            ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{                                                                      \
+       u64 fpm_addr, fpm_limit;                                        \
+       fpm_addr = (hmc_info)->hmc_obj[(type)].base +                   \
+                  (hmc_info)->hmc_obj[(type)].size * (index);          \
+       fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+       *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE);          \
+       *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+       /* add one more to the limit to correct our range */            \
+       *(sd_limit) += 1;                                               \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{                                                                      \
+       u64 fpm_adr, fpm_limit;                                         \
+       fpm_adr = (hmc_info)->hmc_obj[(type)].base +                    \
+                 (hmc_info)->hmc_obj[(type)].size * (idx);             \
+       fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+       *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE);          \
+       *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE);  \
+       /* add one more to the limit to correct our range */            \
+       *(pd_limit) += 1;                                               \
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+                                             struct i40e_hmc_info *hmc_info,
+                                             u32 sd_index,
+                                             enum i40e_sd_entry_type type,
+                                             u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+                                             struct i40e_hmc_info *hmc_info,
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+                                       struct i40e_hmc_info *hmc_info,
+                                       u32 idx);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+                                            u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+                                           struct i40e_hmc_info *hmc_info,
+                                           u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+                                              u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+                                             struct i40e_hmc_info *hmc_info,
+                                             u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_lan_hmc.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_lan_hmc.c
new file mode 100644 (file)
index 0000000..96ca9e5
--- /dev/null
@@ -0,0 +1,1404 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+static u64 i40e_align_l2obj_base(u64 offset)
+{
+       u64 aligned_offset = offset;
+
+       if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+               aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+                                  (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+       return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+                             u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+       u64 fpm_size = 0;
+
+       fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+       fpm_size = i40e_align_l2obj_base(fpm_size);
+
+       fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+       fpm_size = i40e_align_l2obj_base(fpm_size);
+
+       fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+       fpm_size = i40e_align_l2obj_base(fpm_size);
+
+       fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+       fpm_size = i40e_align_l2obj_base(fpm_size);
+
+       return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ *   - HMC Resource Profile has been selected before calling this function.
+ **/
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+                                       u32 rxq_num, u32 fcoe_cntx_num,
+                                       u32 fcoe_filt_num)
+{
+       struct i40e_hmc_obj_info *obj, *full_obj;
+       i40e_status ret_code = I40E_SUCCESS;
+       u64 l2fpm_size;
+       u32 size_exp;
+
+       hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+       hw->hmc.hmc_fn_id = hw->pf_id;
+
+       /* allocate memory for hmc_obj */
+       ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+                       sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+       if (ret_code)
+               goto init_lan_hmc_out;
+       hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+                         hw->hmc.hmc_obj_virt_mem.va;
+
+       /* The full object will be used to create the LAN HMC SD */
+       full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+       full_obj->max_cnt = 0;
+       full_obj->cnt = 0;
+       full_obj->base = 0;
+       full_obj->size = 0;
+
+       /* Tx queue context information */
+       obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+       obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+       obj->cnt = txq_num;
+       obj->base = 0;
+       size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+       obj->size = BIT_ULL(size_exp);
+
+       /* validate values requested by driver don't exceed HMC capacity */
+       if (txq_num > obj->max_cnt) {
+               ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+               hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+                         txq_num, obj->max_cnt, ret_code);
+               goto init_lan_hmc_out;
+       }
+
+       /* aggregate values into the full LAN object for later */
+       full_obj->max_cnt += obj->max_cnt;
+       full_obj->cnt += obj->cnt;
+
+       /* Rx queue context information */
+       obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+       obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+       obj->cnt = rxq_num;
+       obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+                   (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+                    hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+       obj->base = i40e_align_l2obj_base(obj->base);
+       size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+       obj->size = BIT_ULL(size_exp);
+
+       /* validate values requested by driver don't exceed HMC capacity */
+       if (rxq_num > obj->max_cnt) {
+               ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+               hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+                         rxq_num, obj->max_cnt, ret_code);
+               goto init_lan_hmc_out;
+       }
+
+       /* aggregate values into the full LAN object for later */
+       full_obj->max_cnt += obj->max_cnt;
+       full_obj->cnt += obj->cnt;
+
+       /* FCoE context information */
+       obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+       obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+       obj->cnt = fcoe_cntx_num;
+       obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+                   (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+                    hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+       obj->base = i40e_align_l2obj_base(obj->base);
+       size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+       obj->size = BIT_ULL(size_exp);
+
+       /* validate values requested by driver don't exceed HMC capacity */
+       if (fcoe_cntx_num > obj->max_cnt) {
+               ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+               hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+                         fcoe_cntx_num, obj->max_cnt, ret_code);
+               goto init_lan_hmc_out;
+       }
+
+       /* aggregate values into the full LAN object for later */
+       full_obj->max_cnt += obj->max_cnt;
+       full_obj->cnt += obj->cnt;
+
+       /* FCoE filter information */
+       obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+       obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+       obj->cnt = fcoe_filt_num;
+       obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+                   (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+                    hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+       obj->base = i40e_align_l2obj_base(obj->base);
+       size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+       obj->size = BIT_ULL(size_exp);
+
+       /* validate values requested by driver don't exceed HMC capacity */
+       if (fcoe_filt_num > obj->max_cnt) {
+               ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+               hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+                         fcoe_filt_num, obj->max_cnt, ret_code);
+               goto init_lan_hmc_out;
+       }
+
+       /* aggregate values into the full LAN object for later */
+       full_obj->max_cnt += obj->max_cnt;
+       full_obj->cnt += obj->cnt;
+
+       hw->hmc.first_sd_index = 0;
+       hw->hmc.sd_table.ref_cnt = 0;
+       l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+                                              fcoe_filt_num);
+       if (NULL == hw->hmc.sd_table.sd_entry) {
+               hw->hmc.sd_table.sd_cnt = (u32)
+                                  (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+                                  I40E_HMC_DIRECT_BP_SIZE;
+
+               /* allocate the sd_entry members in the sd_table */
+               ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+                                         (sizeof(struct i40e_hmc_sd_entry) *
+                                         hw->hmc.sd_table.sd_cnt));
+               if (ret_code)
+                       goto init_lan_hmc_out;
+               hw->hmc.sd_table.sd_entry =
+                       (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+       }
+       /* store in the LAN full object for later */
+       full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+       return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ *     1. Marks the entry in pd table (for paged address mode) invalid
+ *     2. write to register PMPDINV to invalidate the backing page in FV cache
+ *     3. Decrement the ref count for  pd_entry
+ * assumptions:
+ *     1. caller can deallocate the memory used by pd after this function
+ *        returns.
+ **/
+static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
+                                                struct i40e_hmc_info *hmc_info,
+                                                u32 idx)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
+               ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+
+       return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ *     1. Marks the entry in sd table (for direct address mode) invalid
+ *     2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ *        to 0) and PMSDDATAHIGH to invalidate the sd page
+ *     3. Decrement the ref count for the sd_entry
+ * assumptions:
+ *     1. caller can deallocate the memory used by backing storage after this
+ *        function returns.
+ **/
+static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
+                                              struct i40e_hmc_info *hmc_info,
+                                              u32 idx)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+
+       if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
+               ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+
+       return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
+                               struct i40e_hmc_lan_create_obj_info *info)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       struct i40e_hmc_sd_entry *sd_entry;
+       u32 pd_idx1 = 0, pd_lmt1 = 0;
+       u32 pd_idx = 0, pd_lmt = 0;
+       bool pd_error = false;
+       u32 sd_idx, sd_lmt;
+       u64 sd_size;
+       u32 i, j;
+
+       if (NULL == info) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
+               goto exit;
+       }
+       if (NULL == info->hmc_info) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+               goto exit;
+       }
+       if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
+               goto exit;
+       }
+
+       if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+               ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+               hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+                         ret_code);
+               goto exit;
+       }
+       if ((info->start_idx + info->count) >
+           info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+               ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+               hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+                         ret_code);
+               goto exit;
+       }
+
+       /* find sd index and limit */
+       I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+                                info->start_idx, info->count,
+                                &sd_idx, &sd_lmt);
+       if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+           sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+                       ret_code = I40E_ERR_INVALID_SD_INDEX;
+                       goto exit;
+       }
+       /* find pd index */
+       I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+                                info->start_idx, info->count, &pd_idx,
+                                &pd_lmt);
+
+       /* This is to cover for cases where you may not want to have an SD with
+        * the full 2M memory but something smaller. By not filling out any
+        * size, the function will default the SD size to be 2M.
+        */
+       if (info->direct_mode_sz == 0)
+               sd_size = I40E_HMC_DIRECT_BP_SIZE;
+       else
+               sd_size = info->direct_mode_sz;
+
+       /* check if all the sds are valid. If not, allocate a page and
+        * initialize it.
+        */
+       for (j = sd_idx; j < sd_lmt; j++) {
+               /* update the sd table entry */
+               ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+                                                  info->entry_type,
+                                                  sd_size);
+               if (I40E_SUCCESS != ret_code)
+                       goto exit_sd_error;
+               sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+               if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+                       /* check if all the pds in this sd are valid. If not,
+                        * allocate a page and initialize it.
+                        */
+
+                       /* find pd_idx and pd_lmt in this sd */
+                       pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+                       pd_lmt1 = min(pd_lmt,
+                                     ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+                       for (i = pd_idx1; i < pd_lmt1; i++) {
+                               /* update the pd table entry */
+                               ret_code = i40e_add_pd_table_entry(hw,
+                                                               info->hmc_info,
+                                                               i, NULL);
+                               if (I40E_SUCCESS != ret_code) {
+                                       pd_error = true;
+                                       break;
+                               }
+                       }
+                       if (pd_error) {
+                               /* remove the backing pages from pd_idx1 to i */
+                               while (i && (i > pd_idx1)) {
+                                       i40e_remove_pd_bp(hw, info->hmc_info,
+                                                         (i - 1));
+                                       i--;
+                               }
+                       }
+               }
+               if (!sd_entry->valid) {
+                       sd_entry->valid = true;
+                       switch (sd_entry->entry_type) {
+                       case I40E_SD_TYPE_PAGED:
+                               I40E_SET_PF_SD_ENTRY(hw,
+                                       sd_entry->u.pd_table.pd_page_addr.pa,
+                                       j, sd_entry->entry_type);
+                               break;
+                       case I40E_SD_TYPE_DIRECT:
+                               I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+                                                    j, sd_entry->entry_type);
+                               break;
+                       default:
+                               ret_code = I40E_ERR_INVALID_SD_TYPE;
+                               goto exit;
+                       }
+               }
+       }
+       goto exit;
+
+exit_sd_error:
+       /* cleanup for sd entries from j to sd_idx */
+       while (j && (j > sd_idx)) {
+               sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+               switch (sd_entry->entry_type) {
+               case I40E_SD_TYPE_PAGED:
+                       pd_idx1 = max(pd_idx,
+                                     ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+                       pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+                       for (i = pd_idx1; i < pd_lmt1; i++)
+                               i40e_remove_pd_bp(hw, info->hmc_info, i);
+                       i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+                       break;
+               case I40E_SD_TYPE_DIRECT:
+                       i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+                       break;
+               default:
+                       ret_code = I40E_ERR_INVALID_SD_TYPE;
+                       break;
+               }
+               j--;
+       }
+exit:
+       return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ *   any LAN/FCoE HMC objects can be created.
+ **/
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+                                            enum i40e_hmc_model model)
+{
+       struct i40e_hmc_lan_create_obj_info info;
+       u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+       struct i40e_hmc_obj_info *obj;
+       i40e_status ret_code = I40E_SUCCESS;
+
+       /* Initialize part of the create object info struct */
+       info.hmc_info = &hw->hmc;
+       info.rsrc_type = I40E_HMC_LAN_FULL;
+       info.start_idx = 0;
+       info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+       /* Build the SD entry for the LAN objects */
+       switch (model) {
+       case I40E_HMC_MODEL_DIRECT_PREFERRED:
+       case I40E_HMC_MODEL_DIRECT_ONLY:
+               info.entry_type = I40E_SD_TYPE_DIRECT;
+               /* Make one big object, a single SD */
+               info.count = 1;
+               ret_code = i40e_create_lan_hmc_object(hw, &info);
+               if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+                       goto try_type_paged;
+               else if (ret_code != I40E_SUCCESS)
+                       goto configure_lan_hmc_out;
+               /* else clause falls through the break */
+               break;
+       case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+               info.entry_type = I40E_SD_TYPE_PAGED;
+               /* Make one big object in the PD table */
+               info.count = 1;
+               ret_code = i40e_create_lan_hmc_object(hw, &info);
+               if (ret_code != I40E_SUCCESS)
+                       goto configure_lan_hmc_out;
+               break;
+       default:
+               /* unsupported type */
+               ret_code = I40E_ERR_INVALID_SD_TYPE;
+               hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
+                         ret_code);
+               goto configure_lan_hmc_out;
+       }
+
+       /* Configure and program the FPM registers so objects can be created */
+
+       /* Tx contexts */
+       obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+       wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+            (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+       wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+       /* Rx contexts */
+       obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+       wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+            (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+       wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+       /* FCoE contexts */
+       obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+       wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+        (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+       wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+       /* FCoE filters */
+       obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+       wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+            (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+       wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+       return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs.  It frees
+ * the memory for PDS and backing storage.  After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+                               struct i40e_hmc_lan_delete_obj_info *info)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       struct i40e_hmc_pd_table *pd_table;
+       u32 pd_idx, pd_lmt, rel_pd_idx;
+       u32 sd_idx, sd_lmt;
+       u32 i, j;
+
+       if (NULL == info) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
+               goto exit;
+       }
+       if (NULL == info->hmc_info) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+               goto exit;
+       }
+       if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
+               goto exit;
+       }
+
+       if (NULL == info->hmc_info->sd_table.sd_entry) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
+               goto exit;
+       }
+
+       if (NULL == info->hmc_info->hmc_obj) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+               goto exit;
+       }
+       if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+               ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+               hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+                         ret_code);
+               goto exit;
+       }
+
+       if ((info->start_idx + info->count) >
+           info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+               ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+               hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+                         ret_code);
+               goto exit;
+       }
+
+       I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+                                info->start_idx, info->count, &pd_idx,
+                                &pd_lmt);
+
+       for (j = pd_idx; j < pd_lmt; j++) {
+               sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+               if (I40E_SD_TYPE_PAGED !=
+                   info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+                       continue;
+
+               rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+               pd_table =
+                       &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+               if (pd_table->pd_entry[rel_pd_idx].valid) {
+                       ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
+                       if (I40E_SUCCESS != ret_code)
+                               goto exit;
+               }
+       }
+
+       /* find sd index and limit */
+       I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+                                info->start_idx, info->count,
+                                &sd_idx, &sd_lmt);
+       if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+           sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+               ret_code = I40E_ERR_INVALID_SD_INDEX;
+               goto exit;
+       }
+
+       for (i = sd_idx; i < sd_lmt; i++) {
+               if (!info->hmc_info->sd_table.sd_entry[i].valid)
+                       continue;
+               switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+               case I40E_SD_TYPE_DIRECT:
+                       ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+                       if (I40E_SUCCESS != ret_code)
+                               goto exit;
+                       break;
+               case I40E_SD_TYPE_PAGED:
+                       ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+                       if (I40E_SUCCESS != ret_code)
+                               goto exit;
+                       break;
+               default:
+                       break;
+               }
+       }
+exit:
+       return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+       struct i40e_hmc_lan_delete_obj_info info;
+       i40e_status ret_code;
+
+       info.hmc_info = &hw->hmc;
+       info.rsrc_type = I40E_HMC_LAN_FULL;
+       info.start_idx = 0;
+       info.count = 1;
+
+       /* delete the object */
+       ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+       /* free the SD table entry for LAN */
+       i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+       hw->hmc.sd_table.sd_cnt = 0;
+       hw->hmc.sd_table.sd_entry = NULL;
+
+       /* free memory used for hmc_obj */
+       i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+       hw->hmc.hmc_obj = NULL;
+
+       return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele)          \
+       offsetof(struct _struct, _ele),         \
+       FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+       u16 offset;
+       u16 size_of;
+       u16 width;
+       u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+                                            /* Field      Width    LSB */
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
+/* line 1 */
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
+/* line 7 */
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
+       {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
+       { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+                                        /* Field      Width    LSB */
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,    0   },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,    13  },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,    32  },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,    89  },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,    102 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,    109 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,    114 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,    116 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,    117 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,    118 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,    119 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,    120 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,    124 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,    127 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,    174 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,    193 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,    194 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,    195 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,    196 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,    198 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,    201 },
+       { 0 }
+};
+
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+                           struct i40e_context_ele *ce_info,
+                           u8 *src)
+{
+       u8 src_byte, dest_byte, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+       mask = BIT(ce_info->width) - 1;
+
+       src_byte = *from;
+       src_byte &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_byte <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+       dest_byte &= ~mask;     /* get the bits not changing */
+       dest_byte |= src_byte;  /* add in the new bits */
+
+       /* put it all back */
+       i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+                           struct i40e_context_ele *ce_info,
+                           u8 *src)
+{
+       u16 src_word, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+       __le16 dest_word;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+       mask = BIT(ce_info->width) - 1;
+
+       /* don't swizzle the bits until after the mask because the mask bits
+        * will be in a different bit position on big endian machines
+        */
+       src_word = *(u16 *)from;
+       src_word &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_word <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
+
+       dest_word &= ~(CPU_TO_LE16(mask));      /* get the bits not changing */
+       dest_word |= CPU_TO_LE16(src_word);     /* add in the new bits */
+
+       /* put it all back */
+       i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+                            struct i40e_context_ele *ce_info,
+                            u8 *src)
+{
+       u32 src_dword, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+       __le32 dest_dword;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+
+       /* if the field width is exactly 32 on an x86 machine, then the shift
+        * operation will not work because the SHL instructions count is masked
+        * to 5 bits so the shift will do nothing
+        */
+       if (ce_info->width < 32)
+               mask = BIT(ce_info->width) - 1;
+       else
+               mask = ~(u32)0;
+
+       /* don't swizzle the bits until after the mask because the mask bits
+        * will be in a different bit position on big endian machines
+        */
+       src_dword = *(u32 *)from;
+       src_dword &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_dword <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
+
+       dest_dword &= ~(CPU_TO_LE32(mask));     /* get the bits not changing */
+       dest_dword |= CPU_TO_LE32(src_dword);   /* add in the new bits */
+
+       /* put it all back */
+       i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+                            struct i40e_context_ele *ce_info,
+                            u8 *src)
+{
+       u64 src_qword, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+       __le64 dest_qword;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+
+       /* if the field width is exactly 64 on an x86 machine, then the shift
+        * operation will not work because the SHL instructions count is masked
+        * to 6 bits so the shift will do nothing
+        */
+       if (ce_info->width < 64)
+               mask = BIT_ULL(ce_info->width) - 1;
+       else
+               mask = ~(u64)0;
+
+       /* don't swizzle the bits until after the mask because the mask bits
+        * will be in a different bit position on big endian machines
+        */
+       src_qword = *(u64 *)from;
+       src_qword &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_qword <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
+
+       dest_qword &= ~(CPU_TO_LE64(mask));     /* get the bits not changing */
+       dest_qword |= CPU_TO_LE64(src_qword);   /* add in the new bits */
+
+       /* put it all back */
+       i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_byte - read HMC context byte into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_byte(u8 *hmc_bits,
+                          struct i40e_context_ele *ce_info,
+                          u8 *dest)
+{
+       u8 dest_byte, mask;
+       u8 *src, *target;
+       u16 shift_width;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+       mask = BIT(ce_info->width) - 1;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+
+       /* get the current bits from the src bit string */
+       src = hmc_bits + (ce_info->lsb / 8);
+
+       i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+       dest_byte &= ~(mask);
+
+       dest_byte >>= shift_width;
+
+       /* get the address from the struct field */
+       target = dest + ce_info->offset;
+
+       /* put it back in the struct */
+       i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_word - read HMC context word into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_word(u8 *hmc_bits,
+                          struct i40e_context_ele *ce_info,
+                          u8 *dest)
+{
+       u16 dest_word, mask;
+       u8 *src, *target;
+       u16 shift_width;
+       __le16 src_word;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+       mask = BIT(ce_info->width) - 1;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+
+       /* get the current bits from the src bit string */
+       src = hmc_bits + (ce_info->lsb / 8);
+
+       i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
+
+       /* the data in the memory is stored as little endian so mask it
+        * correctly
+        */
+       src_word &= ~(CPU_TO_LE16(mask));
+
+       /* get the data back into host order before shifting */
+       dest_word = LE16_TO_CPU(src_word);
+
+       dest_word >>= shift_width;
+
+       /* get the address from the struct field */
+       target = dest + ce_info->offset;
+
+       /* put it back in the struct */
+       i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_dword - read HMC context dword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_dword(u8 *hmc_bits,
+                           struct i40e_context_ele *ce_info,
+                           u8 *dest)
+{
+       u32 dest_dword, mask;
+       u8 *src, *target;
+       u16 shift_width;
+       __le32 src_dword;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+
+       /* if the field width is exactly 32 on an x86 machine, then the shift
+        * operation will not work because the SHL instructions count is masked
+        * to 5 bits so the shift will do nothing
+        */
+       if (ce_info->width < 32)
+               mask = BIT(ce_info->width) - 1;
+       else
+               mask = ~(u32)0;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+
+       /* get the current bits from the src bit string */
+       src = hmc_bits + (ce_info->lsb / 8);
+
+       i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
+
+       /* the data in the memory is stored as little endian so mask it
+        * correctly
+        */
+       src_dword &= ~(CPU_TO_LE32(mask));
+
+       /* get the data back into host order before shifting */
+       dest_dword = LE32_TO_CPU(src_dword);
+
+       dest_dword >>= shift_width;
+
+       /* get the address from the struct field */
+       target = dest + ce_info->offset;
+
+       /* put it back in the struct */
+       i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
+                   I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_qword - read HMC context qword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_qword(u8 *hmc_bits,
+                           struct i40e_context_ele *ce_info,
+                           u8 *dest)
+{
+       u64 dest_qword, mask;
+       u8 *src, *target;
+       u16 shift_width;
+       __le64 src_qword;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+
+       /* if the field width is exactly 64 on an x86 machine, then the shift
+        * operation will not work because the SHL instructions count is masked
+        * to 6 bits so the shift will do nothing
+        */
+       if (ce_info->width < 64)
+               mask = BIT_ULL(ce_info->width) - 1;
+       else
+               mask = ~(u64)0;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+
+       /* get the current bits from the src bit string */
+       src = hmc_bits + (ce_info->lsb / 8);
+
+       i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
+
+       /* the data in the memory is stored as little endian so mask it
+        * correctly
+        */
+       src_qword &= ~(CPU_TO_LE64(mask));
+
+       /* get the data back into host order before shifting */
+       dest_qword = LE64_TO_CPU(src_qword);
+
+       dest_qword >>= shift_width;
+
+       /* get the address from the struct field */
+       target = dest + ce_info->offset;
+
+       /* put it back in the struct */
+       i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
+                   I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_get_hmc_context - extract HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static i40e_status i40e_get_hmc_context(u8 *context_bytes,
+                                       struct i40e_context_ele *ce_info,
+                                       u8 *dest)
+{
+       int f;
+
+       for (f = 0; ce_info[f].width != 0; f++) {
+               switch (ce_info[f].size_of) {
+               case 1:
+                       i40e_read_byte(context_bytes, &ce_info[f], dest);
+                       break;
+               case 2:
+                       i40e_read_word(context_bytes, &ce_info[f], dest);
+                       break;
+               case 4:
+                       i40e_read_dword(context_bytes, &ce_info[f], dest);
+                       break;
+               case 8:
+                       i40e_read_qword(context_bytes, &ce_info[f], dest);
+                       break;
+               default:
+                       /* nothing to do, just keep going */
+                       break;
+               }
+       }
+
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw:       the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
+                                       u8 *context_bytes,
+                                       enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+       /* clean the bit array */
+       i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
+                   I40E_DMA_MEM);
+
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info:  a description of the struct to be filled
+ * @dest:     the struct to be filled
+ **/
+static i40e_status i40e_set_hmc_context(u8 *context_bytes,
+                                       struct i40e_context_ele *ce_info,
+                                       u8 *dest)
+{
+       int f;
+
+       for (f = 0; ce_info[f].width != 0; f++) {
+
+               /* we have to deal with each element of the HMC using the
+                * correct size so that we are correct regardless of the
+                * endianness of the machine
+                */
+               switch (ce_info[f].size_of) {
+               case 1:
+                       i40e_write_byte(context_bytes, &ce_info[f], dest);
+                       break;
+               case 2:
+                       i40e_write_word(context_bytes, &ce_info[f], dest);
+                       break;
+               case 4:
+                       i40e_write_dword(context_bytes, &ce_info[f], dest);
+                       break;
+               case 8:
+                       i40e_write_qword(context_bytes, &ce_info[f], dest);
+                       break;
+               }
+       }
+
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hw: pointer to the hw structure
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer.  This function is used for LAN Queue contexts.
+ **/
+static
+i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw,
+                                       u8 **object_base,
+                                       enum i40e_hmc_lan_rsrc_type rsrc_type,
+                                       u32 obj_idx)
+{
+       u32 obj_offset_in_sd, obj_offset_in_pd;
+       struct i40e_hmc_info     *hmc_info = &hw->hmc;
+       struct i40e_hmc_sd_entry *sd_entry;
+       struct i40e_hmc_pd_entry *pd_entry;
+       u32 pd_idx, pd_lmt, rel_pd_idx;
+       i40e_status ret_code = I40E_SUCCESS;
+       u64 obj_offset_in_fpm;
+       u32 sd_idx, sd_lmt;
+
+       if (NULL == hmc_info) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
+               goto exit;
+       }
+       if (NULL == hmc_info->hmc_obj) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+               goto exit;
+       }
+       if (NULL == object_base) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
+               goto exit;
+       }
+       if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+               ret_code = I40E_ERR_BAD_PTR;
+               hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
+               goto exit;
+       }
+       if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+               hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
+                         ret_code);
+               ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+               goto exit;
+       }
+       /* find sd index and limit */
+       I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+                                &sd_idx, &sd_lmt);
+
+       sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+       obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+                           hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+       if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+               I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+                                        &pd_idx, &pd_lmt);
+               rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+               pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+               obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+                                        I40E_HMC_PAGED_BP_SIZE);
+               *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+       } else {
+               obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+                                        I40E_HMC_DIRECT_BP_SIZE);
+               *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+       }
+exit:
+       return ret_code;
+}
+
+/**
+ * i40e_get_lan_tx_queue_context - return the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+i40e_status i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+                                                   u16 queue,
+                                                   struct i40e_hmc_obj_txq *s)
+{
+       i40e_status err;
+       u8 *context_bytes;
+
+       err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+       if (err < 0)
+               return err;
+
+       return i40e_get_hmc_context(context_bytes,
+                                   i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+                                                     u16 queue)
+{
+       i40e_status err;
+       u8 *context_bytes;
+
+       err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+       if (err < 0)
+               return err;
+
+       return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+                                                   u16 queue,
+                                                   struct i40e_hmc_obj_txq *s)
+{
+       i40e_status err;
+       u8 *context_bytes;
+
+       err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+       if (err < 0)
+               return err;
+
+       return i40e_set_hmc_context(context_bytes,
+                                   i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_get_lan_rx_queue_context - return the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+i40e_status i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+                                                   u16 queue,
+                                                   struct i40e_hmc_obj_rxq *s)
+{
+       i40e_status err;
+       u8 *context_bytes;
+
+       err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+       if (err < 0)
+               return err;
+
+       return i40e_get_hmc_context(context_bytes,
+                                   i40e_hmc_rxq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+                                                     u16 queue)
+{
+       i40e_status err;
+       u8 *context_bytes;
+
+       err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+       if (err < 0)
+               return err;
+
+       return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+                                                   u16 queue,
+                                                   struct i40e_hmc_obj_rxq *s)
+{
+       i40e_status err;
+       u8 *context_bytes;
+
+       err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+       if (err < 0)
+               return err;
+
+       return i40e_set_hmc_context(context_bytes,
+                                   i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_lan_hmc.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_lan_hmc.h
new file mode 100644 (file)
index 0000000..efbf586
--- /dev/null
@@ -0,0 +1,193 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct i40e_hmc_obj_rxq {
+       u16 head;
+       u16 cpuid; /* bigger than needed, see above for reason */
+       u64 base;
+       u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+       u16 dbuff; /* bigger than needed, see above for reason */
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+       u16 hbuff; /* bigger than needed, see above for reason */
+       u8  dtype;
+       u8  dsize;
+       u8  crcstrip;
+       u8  fc_ena;
+       u8  l2tsel;
+       u8  hsplit_0;
+       u8  hsplit_1;
+       u8  showiv;
+       u32 rxmax; /* bigger than needed, see above for reason */
+       u8  tphrdesc_ena;
+       u8  tphwdesc_ena;
+       u8  tphdata_ena;
+       u8  tphhead_ena;
+       u16 lrxqthresh; /* bigger than needed, see above for reason */
+       u8  prefena;    /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct i40e_hmc_obj_txq {
+       u16 head;
+       u8  new_context;
+       u64 base;
+       u8  fc_ena;
+       u8  timesync_ena;
+       u8  fd_ena;
+       u8  alt_vlan_ena;
+       u16 thead_wb;
+       u8  cpuid;
+       u8  head_wb_ena;
+       u16 qlen;
+       u8  tphrdesc_ena;
+       u8  tphrpacket_ena;
+       u8  tphwdesc_ena;
+       u64 head_wb_addr;
+       u32 crc;
+       u16 rdylist;
+       u8  rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+       I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT      = 0,
+       I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2      = 1,
+       I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP      = 2,
+       I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+       I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP    = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+       u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+       u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+       I40E_HMC_LAN_OBJ_SZ_8   = 0x3,
+       I40E_HMC_LAN_OBJ_SZ_16  = 0x4,
+       I40E_HMC_LAN_OBJ_SZ_32  = 0x5,
+       I40E_HMC_LAN_OBJ_SZ_64  = 0x6,
+       I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+       I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+       I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ         128
+#define I40E_HMC_OBJ_SIZE_RXQ         32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX   64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT   64
+
+enum i40e_hmc_lan_rsrc_type {
+       I40E_HMC_LAN_FULL  = 0,
+       I40E_HMC_LAN_TX    = 1,
+       I40E_HMC_LAN_RX    = 2,
+       I40E_HMC_FCOE_CTX  = 3,
+       I40E_HMC_FCOE_FILT = 4,
+       I40E_HMC_LAN_MAX   = 5
+};
+
+enum i40e_hmc_model {
+       I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+       I40E_HMC_MODEL_DIRECT_ONLY      = 1,
+       I40E_HMC_MODEL_PAGED_ONLY       = 2,
+       I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+       struct i40e_hmc_info *hmc_info;
+       u32 rsrc_type;
+       u32 start_idx;
+       u32 count;
+       enum i40e_sd_entry_type entry_type;
+       u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+       struct i40e_hmc_info *hmc_info;
+       u32 rsrc_type;
+       u32 start_idx;
+       u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+                                       u32 rxq_num, u32 fcoe_cntx_num,
+                                       u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+                                            enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+                             u32 fcoe_cntx_num, u32 fcoe_filt_num);
+i40e_status i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+                                                   u16 queue,
+                                                   struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+                                                     u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+                                                   u16 queue,
+                                                   struct i40e_hmc_obj_txq *s);
+i40e_status i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+                                                   u16 queue,
+                                                   struct i40e_hmc_obj_rxq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+                                                     u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+                                                   u16 queue,
+                                                   struct i40e_hmc_obj_rxq *s);
+i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
+                               struct i40e_hmc_lan_create_obj_info *info);
+i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+                               struct i40e_hmc_lan_delete_obj_info *info);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_main.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_main.c
new file mode 100644 (file)
index 0000000..fc9b3db
--- /dev/null
@@ -0,0 +1,11748 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Local includes */
+#include "i40e.h"
+#include "i40e_helper.h"
+#include "i40e_diag.h"
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+#ifdef HAVE_VXLAN_CHECKS
+#include <net/vxlan.h>
+#else
+#if IS_ENABLED(CONFIG_VXLAN)
+#include <net/vxlan.h>
+#endif
+#endif /* HAVE_VXLAN_CHECKS */
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+char i40e_driver_name[] = "i40e";
+static const char i40e_driver_string[] =
+                       "Intel(R) Ethernet Connection XL710 Network Driver";
+
+#define DRV_HW_PERF
+#define DRV_FPGA
+#define DRV_X722
+#define DRV_A0
+#ifdef I40E_MSI_INTERRUPT
+#define DRV_KERN "-msi"
+#else
+#ifdef I40E_LEGACY_INTERRUPT
+#define DRV_KERN "-legacy"
+#else
+#define DRV_KERN
+#endif
+#endif
+
+#define DRV_VERSION_MAJOR 1
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 47
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+            __stringify(DRV_VERSION_MINOR) "." \
+            __stringify(DRV_VERSION_BUILD) DRV_HW_PERF DRV_FPGA DRV_X722 DRV_A0 DRV_KERN
+const char i40e_driver_version_str[] = DRV_VERSION;
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2015 Intel Corporation.";
+
+/* a bit of forward declarations */
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
+static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static int i40e_add_vsi(struct i40e_vsi *vsi);
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
+static int i40e_setup_misc_vector(struct i40e_pf *pf);
+static void i40e_determine_queue_usage(struct i40e_pf *pf);
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_fdir_sb_setup(struct i40e_pf *pf);
+static int i40e_veb_get_bw_info(struct i40e_veb *veb);
+/* i40e_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ *   Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id i40e_pci_tbl[] = {
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
+       /* required last entry */
+       {0, }
+};
+MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
+
+#define I40E_MAX_VF_COUNT 128
+#define OPTION_UNSET    -1
+#define I40E_PARAM_INIT { [0 ... I40E_MAX_NIC] = OPTION_UNSET}
+#define I40E_MAX_NIC 64
+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+#ifdef CONFIG_PCI_IOV
+static int max_vfs[I40E_MAX_NIC+1] = I40E_PARAM_INIT;
+module_param_array_named(max_vfs, max_vfs, int, NULL, 0);
+MODULE_PARM_DESC(max_vfs,
+       "Number of Virtual Functions: 0 = disable (default), 1-"
+       XSTRINGIFY(I40E_MAX_VF_COUNT) " = enable "
+       "this many VFs");
+#endif /* CONFIG_PCI_IOV */
+#endif /* HAVE_SRIOV_CONFIGURE */
+
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static struct workqueue_struct *i40e_wq;
+
+/**
+ * i40e_get_lump - find a lump of free generic resource
+ * @pf: board private structure
+ * @pile: the pile of resource to search
+ * @needed: the number of items needed
+ * @id: an owner id to stick on the items assigned
+ *
+ * Returns the base item index of the lump, or negative for error
+ *
+ * The search_hint trick and lack of advanced fit-finding only work
+ * because we're highly likely to have all the same size lump requests.
+ * Linear search time and any fragmentation should be minimal.
+ **/
+static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
+                        u16 needed, u16 id)
+{
+       int ret = -ENOMEM;
+       int i, j;
+
+       if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
+               dev_info(&pf->pdev->dev,
+                        "param err: pile=%p needed=%d id=0x%04x\n",
+                        pile, needed, id);
+               return -EINVAL;
+       }
+
+       /* start the linear search with an imperfect hint */
+       i = pile->search_hint;
+       while (i < pile->num_entries) {
+               /* skip already allocated entries */
+               if (pile->list[i] & I40E_PILE_VALID_BIT) {
+                       i++;
+                       continue;
+               }
+
+               /* do we have enough in this lump? */
+               for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
+                       if (pile->list[i+j] & I40E_PILE_VALID_BIT)
+                               break;
+               }
+
+               if (j == needed) {
+                       /* there was enough, so assign it to the requestor */
+                       for (j = 0; j < needed; j++)
+                               pile->list[i+j] = id | I40E_PILE_VALID_BIT;
+                       ret = i;
+                       pile->search_hint = i + j;
+                       break;
+               }
+
+               /* not enough, so skip over it and continue looking */
+               i += j;
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_put_lump - return a lump of generic resource
+ * @pile: the pile of resource to search
+ * @index: the base item index
+ * @id: the owner id of the items assigned
+ *
+ * Returns the count of items in the lump
+ **/
+static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
+{
+       int valid_id = (id | I40E_PILE_VALID_BIT);
+       int count = 0;
+       int i;
+
+       if (!pile || index >= pile->num_entries)
+               return -EINVAL;
+
+       for (i = index;
+            i < pile->num_entries && pile->list[i] == valid_id;
+            i++) {
+               pile->list[i] = 0;
+               count++;
+       }
+
+       if (count && index < pile->search_hint)
+               pile->search_hint = index;
+
+       return count;
+}
+
+/**
+ * i40e_find_vsi_from_id - searches for the vsi with the given id
+ * @pf - the pf structure to search for the vsi
+ * @id - id of the vsi it is searching for
+ **/
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
+{
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i] && (pf->vsi[i]->id == id))
+                       return pf->vsi[i];
+
+       return NULL;
+}
+
+/**
+ * i40e_service_event_schedule - Schedule the service task to wake up
+ * @pf: board private structure
+ *
+ * If not already scheduled, this puts the task into the work queue
+ **/
+static void i40e_service_event_schedule(struct i40e_pf *pf)
+{
+       if (!test_bit(__I40E_DOWN, &pf->state) &&
+           !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
+           !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+               queue_work(i40e_wq, &pf->service_task);
+}
+
+/**
+ * i40e_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ *
+ * If any port has noticed a Tx timeout, it is likely that the whole
+ * device is munged, not just the one netdev port, so go for the full
+ * reset.
+ **/
+#ifdef I40E_FCOE
+void i40e_tx_timeout(struct net_device *netdev)
+#else
+static void i40e_tx_timeout(struct net_device *netdev)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_ring *tx_ring = NULL;
+       unsigned int i, hung_queue = 0;
+       u32 head, val;
+
+       pf->tx_timeout_count++;
+
+       /* find the stopped queue the same way the stack does */
+       for (i = 0; i < netdev->num_tx_queues; i++) {
+               struct netdev_queue *q;
+               unsigned long trans_start;
+
+               q = netdev_get_tx_queue(netdev, i);
+               trans_start = q->trans_start ? : netdev->trans_start;
+               if (netif_xmit_stopped(q) && time_after(jiffies,
+                       (trans_start + netdev->watchdog_timeo))) {
+                       hung_queue = i;
+                       break;
+               }
+       }
+
+       if (i == netdev->num_tx_queues) {
+               netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
+       } else {
+               /* now that we have an index, find the tx_ring struct */
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+                               if (hung_queue ==
+                                               vsi->tx_rings[i]->queue_index) {
+                                       tx_ring = vsi->tx_rings[i];
+                                       break;
+                               }
+                       }
+               }
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       if (vsi->block_tx_timeout) {
+               netdev_info(netdev, "tx_timeout recovery disabled\n");
+               return;
+       }
+#endif
+
+       if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
+               pf->tx_timeout_recovery_level = 1;  /* reset after some time */
+       else if (time_before(jiffies,
+                      (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
+               return;   /* don't do any new action before the next timeout */
+
+       if (tx_ring) {
+               head = i40e_get_head(tx_ring);
+               /* Read interrupt register */
+               if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+                       val = rd32(&pf->hw,
+                            I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+                                       tx_ring->vsi->base_vector - 1));
+               else
+                       val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+               netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+                               vsi->seid, hung_queue, tx_ring->next_to_clean,
+                               head, tx_ring->next_to_use,
+                               readl(tx_ring->tail), val);
+       }
+
+       pf->tx_timeout_last_recovery = jiffies;
+       netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
+                   pf->tx_timeout_recovery_level, hung_queue);
+
+       switch (pf->tx_timeout_recovery_level) {
+       case 1:
+               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+               break;
+       case 2:
+               set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+               break;
+       case 3:
+               set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+               break;
+       default:
+               netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+               break;
+       }
+
+       i40e_service_event_schedule(pf);
+       pf->tx_timeout_recovery_level++;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+       rx_ring->next_to_use = val;
+
+       /* Force memory writes to complete before letting h/w
+        * know there are new descriptors to fetch.  (Only
+        * applicable for weak-ordered memory model archs,
+        * such as IA-64).
+        */
+       wmb();
+       writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_get_vsi_stats_struct - Get System Network Statistics
+ * @vsi: the VSI we care about
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+#ifdef HAVE_NDO_GET_STATS64
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
+{
+       return &vsi->net_stats;
+}
+#else
+struct net_device_stats *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
+{
+       /* It is possible for a VSIs to not have a netdev */
+       if (vsi->netdev)
+               return &vsi->netdev->stats;
+       else
+               return &vsi->net_stats;
+}
+#endif
+
+/**
+ * i40e_get_netdev_stats_struct - Get statistics for netdev interface
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+#ifdef HAVE_NDO_GET_STATS64
+#ifdef I40E_FCOE
+struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+                                            struct net_device *netdev,
+                                            struct rtnl_link_stats64 *stats)
+#else
+static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+                                            struct net_device *netdev,
+                                            struct rtnl_link_stats64 *stats)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_ring *tx_ring, *rx_ring;
+       struct i40e_vsi *vsi = np->vsi;
+       struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+       int i;
+
+       if (test_bit(__I40E_DOWN, &vsi->state))
+               return stats;
+
+       if (!vsi->tx_rings)
+               return stats;
+
+       rcu_read_lock();
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               u64 bytes, packets;
+               unsigned int start;
+
+               tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+               if (!tx_ring)
+                       continue;
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
+                       packets = tx_ring->stats.packets;
+                       bytes   = tx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+
+               stats->tx_packets += packets;
+               stats->tx_bytes   += bytes;
+               rx_ring = &tx_ring[1];
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
+                       packets = rx_ring->stats.packets;
+                       bytes   = rx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+
+               stats->rx_packets += packets;
+               stats->rx_bytes   += bytes;
+       }
+       rcu_read_unlock();
+
+       /* following stats updated by i40e_watchdog_subtask() */
+       stats->multicast        = vsi_stats->multicast;
+       stats->tx_errors        = vsi_stats->tx_errors;
+       stats->tx_dropped       = vsi_stats->tx_dropped;
+       stats->rx_errors        = vsi_stats->rx_errors;
+       stats->rx_dropped       = vsi_stats->rx_dropped;
+       stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
+       stats->rx_length_errors = vsi_stats->rx_length_errors;
+
+       return stats;
+}
+#else
+#ifdef I40E_FCOE
+struct net_device_stats *i40e_get_netdev_stats_struct(struct net_device *netdev)
+#else
+static struct net_device_stats *i40e_get_netdev_stats_struct(
+                                                     struct net_device *netdev)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+
+       return i40e_get_vsi_stats_struct(vsi);
+}
+#endif
+
+/**
+ * i40e_vsi_reset_stats - Resets all stats of the given vsi
+ * @vsi: the VSI to have its stats reset
+ **/
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
+{
+#ifdef HAVE_NDO_GET_STATS64
+       struct rtnl_link_stats64 *ns;
+#else
+       struct net_device_stats *ns;
+#endif
+       int i;
+
+       if (!vsi)
+               return;
+
+       ns = i40e_get_vsi_stats_struct(vsi);
+       memset(ns, 0, sizeof(*ns));
+       memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
+       memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
+       memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
+       if (vsi->rx_rings && vsi->rx_rings[0]) {
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       memset(&vsi->rx_rings[i]->stats, 0,
+                              sizeof(vsi->rx_rings[i]->stats));
+                       memset(&vsi->rx_rings[i]->rx_stats, 0,
+                              sizeof(vsi->rx_rings[i]->rx_stats));
+                       memset(&vsi->tx_rings[i]->stats, 0,
+                              sizeof(vsi->tx_rings[i]->stats));
+                       memset(&vsi->tx_rings[i]->tx_stats, 0,
+                              sizeof(vsi->tx_rings[i]->tx_stats));
+               }
+       }
+       vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_pf_reset_stats - Reset all of the stats for the given PF
+ * @pf: the PF to be reset
+ **/
+void i40e_pf_reset_stats(struct i40e_pf *pf)
+{
+       int i;
+
+       memset(&pf->stats, 0, sizeof(pf->stats));
+       memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
+       pf->stat_offsets_loaded = false;
+
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               if (pf->veb[i]) {
+                       memset(&pf->veb[i]->stats, 0,
+                              sizeof(pf->veb[i]->stats));
+                       memset(&pf->veb[i]->stats_offsets, 0,
+                              sizeof(pf->veb[i]->stats_offsets));
+                       pf->veb[i]->stat_offsets_loaded = false;
+               }
+       }
+#ifdef I40E_ADD_PROBES
+       pf->tcp_segs = 0;
+       pf->tx_tcp_cso = 0;
+       pf->tx_udp_cso = 0;
+       pf->tx_sctp_cso = 0;
+       pf->tx_ip4_cso = 0;
+       pf->rx_tcp_cso = 0;
+       pf->rx_udp_cso = 0;
+       pf->rx_sctp_cso = 0;
+       pf->rx_ip4_cso = 0;
+       pf->rx_tcp_cso_err = 0;
+       pf->rx_udp_cso_err = 0;
+       pf->rx_sctp_cso_err = 0;
+       pf->rx_ip4_cso_err = 0;
+#endif
+}
+
+/**
+ * i40e_stat_update48 - read and update a 48 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @hireg: the high 32 bit reg to read
+ * @loreg: the low 32 bit reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts.  We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.  In the process, we also manage
+ * the potential roll-over.
+ **/
+static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+                              bool offset_loaded, u64 *offset, u64 *stat)
+{
+       u64 new_data;
+
+       if (hw->device_id == I40E_DEV_ID_QEMU) {
+               new_data = rd32(hw, loreg);
+               new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+       } else {
+               new_data = rd64(hw, loreg);
+       }
+       if (!offset_loaded)
+               *offset = new_data;
+       if (likely(new_data >= *offset))
+               *stat = new_data - *offset;
+       else
+               *stat = (new_data + BIT_ULL(48)) - *offset;
+       *stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * i40e_stat_update32 - read and update a 32 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @reg: the hw reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ **/
+static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
+                              bool offset_loaded, u64 *offset, u64 *stat)
+{
+       u32 new_data;
+
+       new_data = rd32(hw, reg);
+       if (!offset_loaded)
+               *offset = new_data;
+       if (likely(new_data >= *offset))
+               *stat = (u32)(new_data - *offset);
+       else
+               *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
+}
+
+/**
+ * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
+ * @vsi: the VSI to be updated
+ **/
+void i40e_update_eth_stats(struct i40e_vsi *vsi)
+{
+       int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_eth_stats *oes;
+       struct i40e_eth_stats *es;     /* device's eth stats */
+
+       es = &vsi->eth_stats;
+       oes = &vsi->eth_stats_offsets;
+
+       /* Gather up the stats that the hw collects */
+       i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->tx_errors, &es->tx_errors);
+       i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->rx_discards, &es->rx_discards);
+       i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+       i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->tx_errors, &es->tx_errors);
+
+       i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+                          I40E_GLV_GORCL(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->rx_bytes, &es->rx_bytes);
+       i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+                          I40E_GLV_UPRCL(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->rx_unicast, &es->rx_unicast);
+       i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+                          I40E_GLV_MPRCL(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->rx_multicast, &es->rx_multicast);
+       i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+                          I40E_GLV_BPRCL(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->rx_broadcast, &es->rx_broadcast);
+
+       i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+                          I40E_GLV_GOTCL(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->tx_bytes, &es->tx_bytes);
+       i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+                          I40E_GLV_UPTCL(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->tx_unicast, &es->tx_unicast);
+       i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+                          I40E_GLV_MPTCL(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->tx_multicast, &es->tx_multicast);
+       i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+                          I40E_GLV_BPTCL(stat_idx),
+                          vsi->stat_offsets_loaded,
+                          &oes->tx_broadcast, &es->tx_broadcast);
+       vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_veb_stats - Update Switch component statistics
+ * @veb: the VEB being updated
+ **/
+static void i40e_update_veb_stats(struct i40e_veb *veb)
+{
+       struct i40e_pf *pf = veb->pf;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_eth_stats *oes;
+       struct i40e_eth_stats *es;     /* device's eth stats */
+       struct i40e_veb_tc_stats *veb_oes;
+       struct i40e_veb_tc_stats *veb_es;
+       int i, idx = 0;
+
+       idx = veb->stats_idx;
+       es = &veb->stats;
+       oes = &veb->stats_offsets;
+       veb_es = &veb->tc_stats;
+       veb_oes = &veb->tc_stats_offsets;
+
+       /* Gather up the stats that the hw collects */
+       i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->tx_discards, &es->tx_discards);
+       i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+
+       i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->rx_bytes, &es->rx_bytes);
+       i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->rx_unicast, &es->rx_unicast);
+       i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->rx_multicast, &es->rx_multicast);
+       i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->rx_broadcast, &es->rx_broadcast);
+
+       i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->tx_bytes, &es->tx_bytes);
+       i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->tx_unicast, &es->tx_unicast);
+       i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->tx_multicast, &es->tx_multicast);
+       i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
+                          veb->stat_offsets_loaded,
+                          &oes->tx_broadcast, &es->tx_broadcast);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
+                                  I40E_GLVEBTC_RPCL(i, idx),
+                                  veb->stat_offsets_loaded,
+                                  &veb_oes->tc_rx_packets[i],
+                                  &veb_es->tc_rx_packets[i]);
+               i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
+                                  I40E_GLVEBTC_RBCL(i, idx),
+                                  veb->stat_offsets_loaded,
+                                  &veb_oes->tc_rx_bytes[i],
+                                  &veb_es->tc_rx_bytes[i]);
+               i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
+                                  I40E_GLVEBTC_TPCL(i, idx),
+                                  veb->stat_offsets_loaded,
+                                  &veb_oes->tc_tx_packets[i],
+                                  &veb_es->tc_tx_packets[i]);
+               i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
+                                  I40E_GLVEBTC_TBCL(i, idx),
+                                  veb->stat_offsets_loaded,
+                                  &veb_oes->tc_tx_bytes[i],
+                                  &veb_es->tc_tx_bytes[i]);
+       }
+       veb->stat_offsets_loaded = true;
+}
+
+#ifdef I40E_FCOE
+/**
+ * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
+ * @vsi: the VSI that is capable of doing FCoE
+ **/
+static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_fcoe_stats *ofs;
+       struct i40e_fcoe_stats *fs;     /* device's eth stats */
+       int idx;
+
+       if (vsi->type != I40E_VSI_FCOE)
+               return;
+
+       idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
+       fs = &vsi->fcoe_stats;
+       ofs = &vsi->fcoe_stats_offsets;
+
+       i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
+       i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
+       i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
+       i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
+       i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
+       i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
+       i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->fcoe_last_error, &fs->fcoe_last_error);
+       i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
+
+       vsi->fcoe_stat_offsets_loaded = true;
+}
+
+#endif
+/**
+ * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
+ **/
+static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
+{
+       struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+       struct i40e_hw_port_stats *nsd = &pf->stats;
+       struct i40e_hw *hw = &pf->hw;
+       u64 xoff = 0;
+       u16 i, v;
+
+       if ((hw->fc.current_mode != I40E_FC_FULL) &&
+           (hw->fc.current_mode != I40E_FC_RX_PAUSE))
+               return;
+
+       xoff = nsd->link_xoff_rx;
+       i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->link_xoff_rx, &nsd->link_xoff_rx);
+
+       /* No new LFC xoff rx */
+       if (!(nsd->link_xoff_rx - xoff))
+               return;
+
+       /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               struct i40e_vsi *vsi = pf->vsi[v];
+
+               if (!vsi || !vsi->tx_rings[0])
+                       continue;
+
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       struct i40e_ring *ring = vsi->tx_rings[i];
+
+                       clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+               }
+       }
+}
+
+/**
+ * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in PFC mode
+ **/
+static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
+{
+       struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+       struct i40e_hw_port_stats *nsd = &pf->stats;
+       bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
+       struct i40e_dcbx_config *dcb_cfg;
+       struct i40e_hw *hw = &pf->hw;
+       u16 i, v;
+       u8 tc;
+
+       dcb_cfg = &hw->local_dcbx_config;
+
+       /* Collect Link XOFF stats when PFC is disabled */
+       if (!dcb_cfg->pfc.pfcenable) {
+               i40e_update_link_xoff_rx(pf);
+               return;
+       }
+
+       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+               u64 prio_xoff = nsd->priority_xoff_rx[i];
+
+               i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+                                  pf->stat_offsets_loaded,
+                                  &osd->priority_xoff_rx[i],
+                                  &nsd->priority_xoff_rx[i]);
+
+               /* No new PFC xoff rx */
+               if (!(nsd->priority_xoff_rx[i] - prio_xoff))
+                       continue;
+               /* Get the TC for given priority */
+               tc = dcb_cfg->etscfg.prioritytable[i];
+               xoff[tc] = true;
+       }
+
+       /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               struct i40e_vsi *vsi = pf->vsi[v];
+
+               if (!vsi || !vsi->tx_rings[0])
+                       continue;
+
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       struct i40e_ring *ring = vsi->tx_rings[i];
+
+                       tc = ring->dcb_tc;
+                       if (xoff[tc])
+                               clear_bit(__I40E_HANG_CHECK_ARMED,
+                                         &ring->state);
+               }
+       }
+}
+
+/**
+ * i40e_update_vsi_stats - Update the vsi statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * There are a few instances where we store the same stat in a
+ * couple of different structs.  This is partly because we have
+ * the netdev stats that need to be filled out, which is slightly
+ * different from the "eth_stats" defined by the chip and used in
+ * VF communications.  We sort it out here.
+ **/
+static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+#ifdef HAVE_NDO_GET_STATS64
+       struct rtnl_link_stats64 *ons;
+       struct rtnl_link_stats64 *ns;   /* netdev stats */
+#else
+       struct net_device_stats *ons;
+       struct net_device_stats *ns;   /* netdev stats */
+#endif
+       struct i40e_eth_stats *oes;
+       struct i40e_eth_stats *es;     /* device's eth stats */
+       u32 tx_restart, tx_busy;
+       struct i40e_ring *p;
+       u32 rx_page, rx_buf;
+       u64 bytes, packets;
+#ifdef HAVE_NDO_GET_STATS64
+       unsigned int start;
+#endif
+       u64 tx_linearize;
+       u64 rx_p, rx_b;
+       u64 tx_p, tx_b;
+       u16 q;
+
+       if (test_bit(__I40E_DOWN, &vsi->state) ||
+           test_bit(__I40E_CONFIG_BUSY, &pf->state))
+               return;
+
+       ns = i40e_get_vsi_stats_struct(vsi);
+       ons = &vsi->net_stats_offsets;
+       es = &vsi->eth_stats;
+       oes = &vsi->eth_stats_offsets;
+
+       /* Gather up the netdev and vsi stats that the driver collects
+        * on the fly during packet processing
+        */
+       rx_b = rx_p = 0;
+       tx_b = tx_p = 0;
+       tx_restart = tx_busy = tx_linearize = 0;
+       rx_page = 0;
+       rx_buf = 0;
+       rcu_read_lock();
+       for (q = 0; q < vsi->num_queue_pairs; q++) {
+               /* locate Tx ring */
+               p = ACCESS_ONCE(vsi->tx_rings[q]);
+
+#ifdef HAVE_NDO_GET_STATS64
+               do {
+                       start = u64_stats_fetch_begin_irq(&p->syncp);
+#endif
+                       packets = p->stats.packets;
+                       bytes = p->stats.bytes;
+#ifdef HAVE_NDO_GET_STATS64
+               } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+#endif
+               tx_b += bytes;
+               tx_p += packets;
+               tx_restart += p->tx_stats.restart_queue;
+               tx_busy += p->tx_stats.tx_busy;
+               tx_linearize += p->tx_stats.tx_linearize;
+
+               /* Rx queue is part of the same block as Tx queue */
+               p = &p[1];
+#ifdef HAVE_NDO_GET_STATS64
+               do {
+                       start = u64_stats_fetch_begin_irq(&p->syncp);
+#endif
+                       packets = p->stats.packets;
+                       bytes = p->stats.bytes;
+#ifdef HAVE_NDO_GET_STATS64
+               } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+#endif
+               rx_b += bytes;
+               rx_p += packets;
+               rx_buf += p->rx_stats.alloc_buff_failed;
+               rx_page += p->rx_stats.alloc_page_failed;
+       }
+       rcu_read_unlock();
+       vsi->tx_restart = tx_restart;
+       vsi->tx_busy = tx_busy;
+       vsi->tx_linearize = tx_linearize;
+       vsi->rx_page_failed = rx_page;
+       vsi->rx_buf_failed = rx_buf;
+
+       ns->rx_packets = rx_p;
+       ns->rx_bytes = rx_b;
+       ns->tx_packets = tx_p;
+       ns->tx_bytes = tx_b;
+
+       /* update netdev stats from eth stats */
+       i40e_update_eth_stats(vsi);
+       ons->tx_errors = oes->tx_errors;
+       ns->tx_errors = es->tx_errors;
+       ons->multicast = oes->rx_multicast;
+       ns->multicast = es->rx_multicast;
+       ons->rx_dropped = oes->rx_discards;
+       ns->rx_dropped = es->rx_discards;
+       ons->tx_dropped = oes->tx_discards;
+       ns->tx_dropped = es->tx_discards;
+
+       /* pull in a couple PF stats if this is the main vsi */
+       if (vsi == pf->vsi[pf->lan_vsi]) {
+               ns->rx_crc_errors = pf->stats.crc_errors;
+               ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
+               ns->rx_length_errors = pf->stats.rx_length_errors;
+       }
+}
+
+/**
+ * i40e_update_pf_stats - Update the PF statistics counters.
+ * @pf: the PF to be updated
+ **/
+static void i40e_update_pf_stats(struct i40e_pf *pf)
+{
+       struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+       struct i40e_hw_port_stats *nsd = &pf->stats;
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+       int i;
+
+       i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+                          I40E_GLPRT_GORCL(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+       i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+                          I40E_GLPRT_GOTCL(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+       i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->eth.rx_discards,
+                          &nsd->eth.rx_discards);
+       i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+                          I40E_GLPRT_UPRCL(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->eth.rx_unicast,
+                          &nsd->eth.rx_unicast);
+       i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+                          I40E_GLPRT_MPRCL(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->eth.rx_multicast,
+                          &nsd->eth.rx_multicast);
+       i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+                          I40E_GLPRT_BPRCL(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->eth.rx_broadcast,
+                          &nsd->eth.rx_broadcast);
+       i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+                          I40E_GLPRT_UPTCL(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->eth.tx_unicast,
+                          &nsd->eth.tx_unicast);
+       i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+                          I40E_GLPRT_MPTCL(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->eth.tx_multicast,
+                          &nsd->eth.tx_multicast);
+       i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+                          I40E_GLPRT_BPTCL(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->eth.tx_broadcast,
+                          &nsd->eth.tx_broadcast);
+
+       i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->tx_dropped_link_down,
+                          &nsd->tx_dropped_link_down);
+
+       i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->crc_errors, &nsd->crc_errors);
+
+       i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->illegal_bytes, &nsd->illegal_bytes);
+
+       i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->mac_local_faults,
+                          &nsd->mac_local_faults);
+       i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->mac_remote_faults,
+                          &nsd->mac_remote_faults);
+
+       i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_length_errors,
+                          &nsd->rx_length_errors);
+
+       i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->link_xon_rx, &nsd->link_xon_rx);
+       i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->link_xon_tx, &nsd->link_xon_tx);
+       i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
+       i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+       for (i = 0; i < 8; i++) {
+               i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+                                  pf->stat_offsets_loaded,
+                                  &osd->priority_xon_rx[i],
+                                  &nsd->priority_xon_rx[i]);
+               i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+                                  pf->stat_offsets_loaded,
+                                  &osd->priority_xon_tx[i],
+                                  &nsd->priority_xon_tx[i]);
+               i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+                                  pf->stat_offsets_loaded,
+                                  &osd->priority_xoff_tx[i],
+                                  &nsd->priority_xoff_tx[i]);
+               i40e_stat_update32(hw,
+                                  I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+                                  pf->stat_offsets_loaded,
+                                  &osd->priority_xon_2_xoff[i],
+                                  &nsd->priority_xon_2_xoff[i]);
+       }
+
+       i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+                          I40E_GLPRT_PRC64L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_size_64, &nsd->rx_size_64);
+       i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+                          I40E_GLPRT_PRC127L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_size_127, &nsd->rx_size_127);
+       i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+                          I40E_GLPRT_PRC255L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_size_255, &nsd->rx_size_255);
+       i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+                          I40E_GLPRT_PRC511L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_size_511, &nsd->rx_size_511);
+       i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+                          I40E_GLPRT_PRC1023L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_size_1023, &nsd->rx_size_1023);
+       i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+                          I40E_GLPRT_PRC1522L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_size_1522, &nsd->rx_size_1522);
+       i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+                          I40E_GLPRT_PRC9522L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_size_big, &nsd->rx_size_big);
+
+       i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+                          I40E_GLPRT_PTC64L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->tx_size_64, &nsd->tx_size_64);
+       i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+                          I40E_GLPRT_PTC127L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->tx_size_127, &nsd->tx_size_127);
+       i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+                          I40E_GLPRT_PTC255L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->tx_size_255, &nsd->tx_size_255);
+       i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+                          I40E_GLPRT_PTC511L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->tx_size_511, &nsd->tx_size_511);
+       i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+                          I40E_GLPRT_PTC1023L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->tx_size_1023, &nsd->tx_size_1023);
+       i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+                          I40E_GLPRT_PTC1522L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->tx_size_1522, &nsd->tx_size_1522);
+       i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+                          I40E_GLPRT_PTC9522L(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->tx_size_big, &nsd->tx_size_big);
+
+       i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_undersize, &nsd->rx_undersize);
+       i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_fragments, &nsd->rx_fragments);
+       i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_oversize, &nsd->rx_oversize);
+       i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+                          pf->stat_offsets_loaded,
+                          &osd->rx_jabber, &nsd->rx_jabber);
+
+       /* FDIR stats */
+       i40e_stat_update32(hw,
+                          I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
+                          pf->stat_offsets_loaded,
+                          &osd->fd_atr_match, &nsd->fd_atr_match);
+       i40e_stat_update32(hw,
+                          I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
+                          pf->stat_offsets_loaded,
+                          &osd->fd_sb_match, &nsd->fd_sb_match);
+       i40e_stat_update32(hw,
+                     I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
+                     pf->stat_offsets_loaded,
+                     &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
+
+       val = rd32(hw, I40E_PRTPM_EEE_STAT);
+       nsd->tx_lpi_status =
+                      (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+                       I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+       nsd->rx_lpi_status =
+                      (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+                       I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+       i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+                          pf->stat_offsets_loaded,
+                          &osd->tx_lpi_count, &nsd->tx_lpi_count);
+       i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
+                          pf->stat_offsets_loaded,
+                          &osd->rx_lpi_count, &nsd->rx_lpi_count);
+
+       if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+               nsd->fd_sb_status = false;
+       else if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
+               nsd->fd_sb_status = true;
+       else
+               nsd->fd_sb_status = false;
+
+       if (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)
+               nsd->fd_atr_status = false;
+       else if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+               nsd->fd_atr_status = true;
+       else
+               nsd->fd_atr_status = false;
+
+       pf->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_stats - Update the various statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * Update the various stats for this VSI and its related entities.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+
+       if (vsi == pf->vsi[pf->lan_vsi])
+               i40e_update_pf_stats(pf);
+
+       i40e_update_vsi_stats(vsi);
+#ifdef I40E_FCOE
+       i40e_update_fcoe_stats(vsi);
+#endif
+}
+
+/**
+ * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a VF filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
+                                        u8 *macaddr, s16 vlan,
+                                        bool is_vf, bool is_netdev)
+{
+       struct i40e_mac_filter *f;
+
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
+       if (!vsi || !macaddr)
+               return NULL;
+
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if ((ether_addr_equal(macaddr, f->macaddr)) &&
+                   (vlan == f->vlan)    &&
+                   (!is_vf || f->is_vf) &&
+                   (!is_netdev || f->is_netdev))
+                       return f;
+       }
+       return NULL;
+}
+
+/**
+ * i40e_find_mac - Find a mac addr in the macvlan filters list
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address we are searching for
+ * @is_vf: make sure its a VF filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns the first filter with the provided MAC address or NULL if
+ * MAC address was not found
+ **/
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+                                     bool is_vf, bool is_netdev)
+{
+       struct i40e_mac_filter *f;
+
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
+       if (!vsi || !macaddr)
+               return NULL;
+
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if ((ether_addr_equal(macaddr, f->macaddr)) &&
+                   (!is_vf || f->is_vf) &&
+                   (!is_netdev || f->is_netdev))
+                       return f;
+       }
+       return NULL;
+}
+
+/**
+ * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
+ * @vsi: the VSI to be searched
+ *
+ * Returns true if VSI is in vlan mode or false otherwise
+ **/
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
+{
+       struct i40e_mac_filter *f;
+
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
+       /* Only -1 for all the filters denotes not in vlan mode
+        * so we have to go through all the list in order to make sure
+        */
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if (f->vlan >= 0 || vsi->info.pvid)
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be filtered
+ * @is_vf: true if it is a VF
+ * @is_netdev: true if it is a netdev
+ *
+ * Goes through all the macvlan filters and adds a
+ * macvlan filter for each unique vlan that already exists
+ *
+ * Returns first filter found on success, else NULL
+ **/
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+                                            bool is_vf, bool is_netdev)
+{
+       struct i40e_mac_filter *f;
+
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if (vsi->info.pvid)
+                       f->vlan = le16_to_cpu(vsi->info.pvid);
+               if (!i40e_find_filter(vsi, macaddr, f->vlan,
+                                     is_vf, is_netdev)) {
+                       if (!i40e_add_filter(vsi, macaddr, f->vlan,
+                                            is_vf, is_netdev))
+                               return NULL;
+               }
+       }
+
+       return list_first_entry_or_null(&vsi->mac_filter_list,
+                                       struct i40e_mac_filter, list);
+}
+
+/**
+ * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
+ * @vsi: the PF Main VSI - inappropriate for any other VSI
+ * @macaddr: the MAC address
+ *
+ * Some older firmware configurations set up a default promiscuous vlan
+ * filter that needs to be removed.
+ **/
+static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+{
+       struct i40e_aqc_remove_macvlan_element_data element;
+       struct i40e_pf *pf = vsi->back;
+       i40e_status ret;
+
+       /* Only appropriate for the PF main VSI */
+       if (vsi->type != I40E_VSI_MAIN)
+               return -EINVAL;
+
+       memset(&element, 0, sizeof(element));
+       ether_addr_copy(element.mac_addr, macaddr);
+       element.vlan_tag = 0;
+       element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+                       I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+       if (ret)
+               return -ENOENT;
+
+       return 0;
+}
+
+/**
+ * i40e_add_filter - Add a mac/vlan filter to the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a VF filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held. If needed could add WARN/BUG_ON if lock is not held for debug
+ * purpose.
+ **/
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+                                       u8 *macaddr, s16 vlan,
+                                       bool is_vf, bool is_netdev)
+{
+       struct i40e_mac_filter *f;
+
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
+       if (!vsi || !macaddr)
+               return NULL;
+
+       f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+       if (!f) {
+               f = kzalloc(sizeof(*f), GFP_ATOMIC);
+               if (!f)
+                       goto add_filter_out;
+
+               ether_addr_copy(f->macaddr, macaddr);
+               f->vlan = vlan;
+               f->changed = true;
+
+               INIT_LIST_HEAD(&f->list);
+               list_add(&f->list, &vsi->mac_filter_list);
+       }
+
+       /* increment counter and add a new flag if needed */
+       if (is_vf) {
+               if (!f->is_vf) {
+                       f->is_vf = true;
+                       f->counter++;
+               }
+       } else if (is_netdev) {
+               if (!f->is_netdev) {
+                       f->is_netdev = true;
+                       f->counter++;
+               }
+       } else {
+               f->counter++;
+       }
+
+       /* changed tells sync_filters_subtask to
+        * push the filter down to the firmware
+        */
+       if (f->changed) {
+               vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+               vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+       }
+
+add_filter_out:
+       return f;
+}
+
+/**
+ * i40e_del_filter - Remove a mac/vlan filter from the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure it's a VF filter, else doesn't matter
+ * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held. If needed could add WARN/BUG_ON if lock is not held for debug
+ * purpose.
+ **/
+void i40e_del_filter(struct i40e_vsi *vsi,
+                    u8 *macaddr, s16 vlan,
+                    bool is_vf, bool is_netdev)
+{
+       struct i40e_mac_filter *f;
+
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+               "Missing mac_filter_list_lock\n");
+
+       if (!vsi || !macaddr)
+               return;
+
+       f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+       if (!f || f->counter == 0)
+               return;
+
+       if (is_vf) {
+               if (f->is_vf) {
+                       f->is_vf = false;
+                       f->counter--;
+               }
+       } else if (is_netdev) {
+               if (f->is_netdev) {
+                       f->is_netdev = false;
+                       f->counter--;
+               }
+       } else {
+               /* make sure we don't remove a filter in use by VF or netdev */
+               int min_f = 0;
+
+               min_f += (f->is_vf ? 1 : 0);
+               min_f += (f->is_netdev ? 1 : 0);
+
+               if (f->counter > min_f)
+                       f->counter--;
+       }
+
+       /* counter == 0 tells sync_filters_subtask to
+        * remove the filter from the firmware's list
+        */
+       if (f->counter == 0) {
+               f->changed = true;
+               vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+               vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+       }
+}
+
+/**
+ * i40e_set_mac - NDO callback to set mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+#ifdef I40E_FCOE
+int i40e_set_mac(struct net_device *netdev, void *p)
+#else
+static int i40e_set_mac(struct net_device *netdev, void *p)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct sockaddr *addr = p;
+       struct i40e_mac_filter *f;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
+               netdev_info(netdev, "already using mac address %pM\n",
+                           addr->sa_data);
+               return 0;
+       }
+
+       if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+           test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+               return -EADDRNOTAVAIL;
+
+       if (ether_addr_equal(hw->mac.addr, addr->sa_data))
+               netdev_info(netdev, "returning to hw mac address %pM\n",
+                           hw->mac.addr);
+       else
+               netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+
+       if (vsi->type == I40E_VSI_MAIN) {
+               i40e_status ret;
+
+               ret = i40e_aq_mac_address_write(&vsi->back->hw,
+                                               I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                               addr->sa_data, NULL);
+               if (ret) {
+                       netdev_info(netdev,
+                                   "Addr change for Main VSI failed: %d\n",
+                                   ret);
+                       return -EADDRNOTAVAIL;
+               }
+       }
+
+       if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
+               struct i40e_aqc_remove_macvlan_element_data element;
+
+               memset(&element, 0, sizeof(element));
+               ether_addr_copy(element.mac_addr, netdev->dev_addr);
+               element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+               i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+       } else {
+               spin_lock_bh(&vsi->mac_filter_list_lock);
+               i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
+                               false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+       }
+
+       if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
+               struct i40e_aqc_add_macvlan_element_data element;
+
+               memset(&element, 0, sizeof(element));
+               ether_addr_copy(element.mac_addr, hw->mac.addr);
+               element.flags = CPU_TO_LE16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
+               i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+       } else {
+               spin_lock_bh(&vsi->mac_filter_list_lock);
+               f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
+                                   false, false);
+               if (f)
+                       f->is_laa = true;
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+       }
+
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
+       return i40e_sync_vsi_filters(vsi, false);
+}
+
+/**
+ * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
+ * @vsi: the VSI being setup
+ * @ctxt: VSI context structure
+ * @enabled_tc: Enabled TCs bitmap
+ * @is_add: True if called before Add VSI
+ *
+ * Setup VSI queue mapping for enabled traffic classes.
+ **/
+#ifdef I40E_FCOE
+void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+                             struct i40e_vsi_context *ctxt,
+                             u8 enabled_tc,
+                             bool is_add)
+#else
+static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+                                    struct i40e_vsi_context *ctxt,
+                                    u8 enabled_tc,
+                                    bool is_add)
+#endif
+{
+       struct i40e_pf *pf = vsi->back;
+       u16 sections = 0;
+       u8 netdev_tc = 0;
+       u16 numtc = 0;
+       u16 qcount;
+       u8 offset;
+       u16 qmap;
+       int i;
+       u16 num_tc_qps = 0;
+
+       sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+       offset = 0;
+
+       if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+               /* Find numtc from enabled TC bitmap */
+               for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+                       if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
+                               numtc++;
+               }
+               if (!numtc) {
+                       dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
+                       numtc = 1;
+               }
+       } else {
+               /* At least TC0 is enabled in case of non-DCB case */
+               numtc = 1;
+       }
+
+       vsi->tc_config.numtc = numtc;
+       vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+       /* Number of queues per enabled TC */
+       /* In MFP case we can have a much lower count of msix
+        * vectors available and so we need to lower the used
+        * q count.
+        */
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+               qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+       else
+               qcount = vsi->alloc_queue_pairs;
+
+       num_tc_qps = qcount / numtc;
+       num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
+
+       /* Setup queue offset/count for all TCs for given VSI */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               /* See if the given TC is enabled for the given VSI */
+               if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { /* TC is enabled */
+                       int pow, num_qps;
+
+                       switch (vsi->type) {
+                       case I40E_VSI_MAIN:
+                               qcount = min_t(int, pf->rss_size, num_tc_qps);
+                               break;
+#ifdef I40E_FCOE
+                       case I40E_VSI_FCOE:
+                               qcount = num_tc_qps;
+                               break;
+#endif
+                       case I40E_VSI_FDIR:
+                       case I40E_VSI_SRIOV:
+                       case I40E_VSI_VMDQ2:
+                       default:
+                               qcount = num_tc_qps;
+                               WARN_ON(i != 0);
+                               break;
+                       }
+                       vsi->tc_config.tc_info[i].qoffset = offset;
+                       vsi->tc_config.tc_info[i].qcount = qcount;
+
+                       /* find the next higher power-of-2 of num queue pairs */
+                       num_qps = qcount;
+                       pow = 0;
+                       while (num_qps && (BIT_ULL(pow) < qcount)) {
+                               pow++;
+                               num_qps >>= 1;
+                       }
+
+                       vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
+                       qmap =
+                           (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+                           (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+                       offset += qcount;
+               } else {
+                       /* TC is not enabled so set the offset to
+                        * default queue and allocate one queue
+                        * for the given TC.
+                        */
+                       vsi->tc_config.tc_info[i].qoffset = 0;
+                       vsi->tc_config.tc_info[i].qcount = 1;
+                       vsi->tc_config.tc_info[i].netdev_tc = 0;
+
+                       qmap = 0;
+               }
+               ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+       }
+
+       /* Set actual Tx/Rx queue pairs */
+       vsi->num_queue_pairs = offset;
+       if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
+               /* This code helps add more queue to the VSI if we have
+                * more cores than RSS can support, the higher cores will
+                * be served by ATR or other filters.
+                */
+               if (vsi->req_queue_pairs > 0)
+                       vsi->num_queue_pairs = vsi->req_queue_pairs;
+               else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+                       vsi->num_queue_pairs = pf->num_lan_msix;
+       }
+
+       /* Scheduler section valid can only be set for ADD VSI */
+       if (is_add) {
+               sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+
+               ctxt->info.up_enable_bits = enabled_tc;
+       }
+       if (vsi->type == I40E_VSI_SRIOV) {
+               ctxt->info.mapping_flags |=
+                                    cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+               for (i = 0; i < vsi->num_queue_pairs; i++)
+                       ctxt->info.queue_mapping[i] =
+                                              cpu_to_le16(vsi->base_queue + i);
+       } else {
+               ctxt->info.mapping_flags |=
+                                       cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+               ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+       }
+       ctxt->info.valid_sections |= cpu_to_le16(sections);
+}
+
+/**
+ * i40e_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+#ifdef I40E_FCOE
+void i40e_set_rx_mode(struct net_device *netdev)
+#else
+static void i40e_set_rx_mode(struct net_device *netdev)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_mac_filter *f, *ftmp;
+       struct i40e_vsi *vsi = np->vsi;
+       struct netdev_hw_addr *uca;
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+       struct netdev_hw_addr *mca;
+#else
+       struct dev_mc_list *mca;
+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
+       struct netdev_hw_addr *ha;
+
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
+       /* add addr if not already in the filter list */
+       netdev_for_each_uc_addr(uca, netdev) {
+               if (!i40e_find_mac(vsi, uca->addr, false, true)) {
+                       if (i40e_is_vsi_in_vlan(vsi))
+                               i40e_put_mac_in_vlan(vsi, uca->addr,
+                                                    false, true);
+                       else
+                               i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
+                                               false, true);
+               }
+       }
+
+       netdev_for_each_mc_addr(mca, netdev) {
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+               if (!i40e_find_mac(vsi, mca->addr, false, true)) {
+                       if (i40e_is_vsi_in_vlan(vsi))
+                               i40e_put_mac_in_vlan(vsi, mca->addr,
+                                                    false, true);
+                       else
+                               i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
+                                               false, true);
+               }
+#else
+               if (!i40e_find_mac(vsi, mca->dmi_addr, false, true)) {
+                       if (i40e_is_vsi_in_vlan(vsi))
+                               i40e_put_mac_in_vlan(vsi, mca->dmi_addr,
+                                                    false, true);
+                       else
+                               i40e_add_filter(vsi, mca->dmi_addr,
+                                               I40E_VLAN_ANY, false, true);
+               }
+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
+       }
+
+       /* remove filter if not in netdev list */
+       list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+
+               if (!f->is_netdev)
+                       continue;
+
+               netdev_for_each_mc_addr(mca, netdev)
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+                       if (ether_addr_equal(mca->addr, f->macaddr))
+#else
+                       if (ether_addr_equal(mca->dmi_addr, f->macaddr))
+#endif
+                               goto bottom_of_search_loop;
+
+               netdev_for_each_uc_addr(uca, netdev)
+                       if (ether_addr_equal(uca->addr, f->macaddr))
+                               goto bottom_of_search_loop;
+
+               for_each_dev_addr(netdev, ha)
+                       if (ether_addr_equal(ha->addr, f->macaddr))
+                               goto bottom_of_search_loop;
+
+               /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+               i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+
+bottom_of_search_loop:
+               continue;
+       }
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       /* check for other flag changes */
+       if (vsi->current_netdev_flags != vsi->netdev->flags) {
+               vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+               vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+       }
+}
+
+/**
+ * i40e_mac_filter_entry_clone - Clones a MAC filter entry
+ * @src: source MAC filter entry to be clones
+ *
+ * Returns the pointer to newly cloned MAC filter entry or NULL
+ * in case of error
+ **/
+static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
+                                       struct i40e_mac_filter *src)
+{
+       struct i40e_mac_filter *f;
+
+       f = kzalloc(sizeof(*f), GFP_ATOMIC);
+       if (!f)
+               return NULL;
+       *f = *src;
+
+       INIT_LIST_HEAD(&f->list);
+
+       return f;
+}
+
+/**
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ *        those entries needs to be undone.
+ *
+ * MAC filter entries from list were slated to be removed from device.
+ **/
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+                               struct list_head *from)
+{
+       struct i40e_mac_filter *f, *ftmp;
+
+       list_for_each_entry_safe(f, ftmp, from, list) {
+               f->changed = true;
+               /* Move the element back into MAC filter list*/
+               list_move_tail(&f->list, &vsi->mac_filter_list);
+       }
+}
+
+/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ *        those entries needs to be undone.
+ *
+ * MAC filter entries from list were slated to be added from device.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+{
+       struct i40e_mac_filter *f, *ftmp;
+
+       list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+               if (f->changed == false && f->counter != 0)
+                       f->changed = true;
+       }
+}
+
+/**
+ * i40e_cleanup_add_list - Deletes the element from add list and release
+ *                     memory
+ * @from: Pointer to list which contains MAC filter entries
+ **/
+static void i40e_cleanup_add_list(struct list_head *add_list)
+{
+       struct i40e_mac_filter *f, *ftmp;
+
+       list_for_each_entry_safe(f, ftmp, add_list, list) {
+               list_del(&f->list);
+               kfree(f);
+       }
+}
+
+/**
+ * i40e_sync_vsi_filters - Update the VSI filter list to the HW
+ * @vsi: ptr to the VSI
+ * @grab_rtnl: whether RTNL needs to be grabbed
+ *
+ * Push any outstanding VSI filter changes through the AdminQ.
+ *
+ * Returns 0 or error value
+ **/
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
+{
+       struct list_head tmp_del_list, tmp_add_list;
+       struct i40e_mac_filter *f, *ftmp, *fclone;
+       bool promisc_forced_on = false;
+       bool add_happened = false;
+       int filter_list_len = 0;
+       u32 changed_flags = 0;
+       bool err_cond = false;
+       i40e_status ret = 0;
+       struct i40e_pf *pf;
+       int num_add = 0;
+       int num_del = 0;
+       int aq_err = 0;
+       u16 cmd_flags;
+
+       /* empty array typed pointers, kcalloc later */
+       struct i40e_aqc_add_macvlan_element_data *add_list;
+       struct i40e_aqc_remove_macvlan_element_data *del_list;
+
+       while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
+               usleep_range(1000, 2000);
+       pf = vsi->back;
+
+       if (vsi->netdev) {
+               changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
+               vsi->current_netdev_flags = vsi->netdev->flags;
+       }
+
+       INIT_LIST_HEAD(&tmp_del_list);
+       INIT_LIST_HEAD(&tmp_add_list);
+
+       if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
+               vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
+
+               spin_lock_bh(&vsi->mac_filter_list_lock);
+               list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+                       if (!f->changed)
+                               continue;
+
+                       if (f->counter != 0)
+                               continue;
+                       f->changed = false;
+
+                       /* Move the element into temporary del_list */
+                       list_move_tail(&f->list, &tmp_del_list);
+               }
+
+               list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+                       if (!f->changed)
+                               continue;
+
+                       if (f->counter == 0)
+                               continue;
+                       f->changed = false;
+
+                       /* Clone MAC filter entry and add into temporary list */
+                       fclone = i40e_mac_filter_entry_clone(f);
+                       if (!fclone) {
+                               err_cond = true;
+                               break;
+                       }
+                       list_add_tail(&fclone->list, &tmp_add_list);
+               }
+
+               /* if failed to clone MAC filter entry - undo */
+               if (err_cond) {
+                       i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+                       i40e_undo_add_filter_entries(vsi);
+               }
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+               if (err_cond)
+                       i40e_cleanup_add_list(&tmp_add_list);
+       }
+
+       /* Now process 'del_list' outside the lock */
+       if (!list_empty(&tmp_del_list)) {
+
+               filter_list_len = pf->hw.aq.asq_buf_size /
+                           sizeof(struct i40e_aqc_remove_macvlan_element_data);
+               del_list = kcalloc(filter_list_len,
+                           sizeof(struct i40e_aqc_remove_macvlan_element_data),
+                           GFP_KERNEL);
+               if (!del_list) {
+                       i40e_cleanup_add_list(&tmp_add_list);
+
+                       /* Undo VSI's MAC filter entry element updates */
+                       spin_lock_bh(&vsi->mac_filter_list_lock);
+                       i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+                       i40e_undo_add_filter_entries(vsi);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+                       return -ENOMEM;
+               }
+
+               list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
+                       cmd_flags = 0;
+
+                       /* add to delete list */
+                       ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
+                       del_list[num_del].vlan_tag =
+                               CPU_TO_LE16((u16)(f->vlan ==
+                                           I40E_VLAN_ANY ? 0 : f->vlan));
+
+                       cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+                       del_list[num_del].flags = cmd_flags;
+                       num_del++;
+
+                       /* flush a full buffer */
+                       if (num_del == filter_list_len) {
+                               ret = i40e_aq_remove_macvlan(&pf->hw,
+                                           vsi->seid, del_list, num_del,
+                                           NULL);
+                               aq_err = pf->hw.aq.asq_last_status;
+                               num_del = 0;
+                               memset(del_list, 0, sizeof(*del_list));
+
+                               if (ret && aq_err != I40E_AQ_RC_ENOENT)
+                                       dev_err(&pf->pdev->dev,
+                                                "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+                                                i40e_stat_str(&pf->hw, ret),
+                                                i40e_aq_str(&pf->hw, aq_err));
+                       }
+                       /* Release memory for MAC filter entries which were
+                        * synced up with HW.
+                        */
+                       list_del(&f->list);
+                       kfree(f);
+               }
+
+               if (num_del) {
+                       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+                                                    del_list, num_del, NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
+                       num_del = 0;
+
+                       if (ret && aq_err != I40E_AQ_RC_ENOENT)
+                               dev_info(&pf->pdev->dev,
+                                        "ignoring delete macvlan error, err %s aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw, aq_err));
+               }
+
+               kfree(del_list);
+               del_list = NULL;
+       }
+
+       if (!list_empty(&tmp_add_list)) {
+
+               /* do all the adds now */
+               filter_list_len = pf->hw.aq.asq_buf_size /
+                              sizeof(struct i40e_aqc_add_macvlan_element_data),
+               add_list = kcalloc(filter_list_len,
+                              sizeof(struct i40e_aqc_add_macvlan_element_data),
+                              GFP_KERNEL);
+               if (!add_list) {
+                       /* Purge element from temporary lists */
+                       i40e_cleanup_add_list(&tmp_add_list);
+
+                       /* Undo add filter entries from VSI MAC filter list */
+                       spin_lock_bh(&vsi->mac_filter_list_lock);
+                       i40e_undo_add_filter_entries(vsi);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+                       return -ENOMEM;
+               }
+
+               list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
+
+                       add_happened = true;
+                       cmd_flags = 0;
+
+                       /* add to add array */
+                       ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
+                       add_list[num_add].vlan_tag =
+                               CPU_TO_LE16(
+                                (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
+                       add_list[num_add].queue_number = 0;
+
+                       cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+                       add_list[num_add].flags = CPU_TO_LE16(cmd_flags);
+                       num_add++;
+
+                       /* flush a full buffer */
+                       if (num_add == filter_list_len) {
+                               ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                         add_list, num_add,
+                                                         NULL);
+                               aq_err = pf->hw.aq.asq_last_status;
+                               num_add = 0;
+
+                               if (ret)
+                                       break;
+                               memset(add_list, 0, sizeof(*add_list));
+                       }
+                       /* Entries from tmp_add_list were cloned from MAC
+                        * filter list, hence clean those cloned entries
+                        */
+                       list_del(&f->list);
+                       kfree(f);
+               }
+
+               if (num_add) {
+                       ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                 add_list, num_add, NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
+                       num_add = 0;
+               }
+               kfree(add_list);
+               add_list = NULL;
+
+               if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
+                       dev_info(&pf->pdev->dev,
+                                "add filter failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw, aq_err));
+                       if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
+                           !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+                                     &vsi->state)) {
+                               promisc_forced_on = true;
+                               set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+                                       &vsi->state);
+                               dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
+                       }
+               }
+       }
+
+       /* check for changes in promiscuous modes */
+       if (changed_flags & IFF_ALLMULTI) {
+               bool cur_multipromisc;
+
+               cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
+               ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+                                                              vsi->seid,
+                                                              cur_multipromisc,
+                                                              NULL);
+               if (ret)
+                       dev_info(&pf->pdev->dev,
+                                "set multi promisc failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
+       }
+       if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
+               bool cur_promisc;
+
+               cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
+                              test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+                                       &vsi->state));
+               if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
+                       /*  set defport ON for Main VSI instead of true promisc
+                        *  this way we will get all unicast/multicast and vlan
+                        *  promisc behavior but will not get VF or VMDq traffic
+                        *  replicated on the Main VSI.
+                        */
+                       if (pf->cur_promisc != cur_promisc) {
+                               pf->cur_promisc = cur_promisc;
+                               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+                       }
+               } else {
+                       ret = i40e_aq_set_vsi_unicast_promiscuous(
+                                                            &vsi->back->hw,
+                                                            vsi->seid,
+                                                            cur_promisc, NULL);
+                       if (ret)
+                               dev_info(&pf->pdev->dev,
+                                        "set unicast promisc failed, err %s, aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                        pf->hw.aq.asq_last_status));
+                       ret = i40e_aq_set_vsi_multicast_promiscuous(
+                                                                &vsi->back->hw,
+                                                                vsi->seid,
+                                                                cur_promisc,
+                                                                NULL);
+                       if (ret)
+                               dev_info(&pf->pdev->dev,
+                                        "set multicast promisc failed, err %s, aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                        pf->hw.aq.asq_last_status));
+               }
+               ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+                                                  vsi->seid,
+                                                  cur_promisc, NULL);
+               if (ret)
+                       dev_info(&pf->pdev->dev,
+                                "set brdcast promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
+       }
+
+       clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+       return 0;
+}
+
+/**
+ * i40e_sync_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+{
+       int v;
+
+       if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
+               return;
+
+       pf->flags &= ~I40E_FLAG_FILTER_SYNC;
+
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               if (pf->vsi[v] &&
+                   (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
+                       int ret = i40e_sync_vsi_filters(pf->vsi[v], true);
+                       if (ret) {
+                               /* come back and try again later */
+                               pf->flags |= I40E_FLAG_FILTER_SYNC;
+                               break;
+                       }
+               }
+       }
+}
+
+/**
+ * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+       struct i40e_vsi *vsi = np->vsi;
+
+       /* MTU < 68 is an error and causes problems on some kernels */
+       if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+               return -EINVAL;
+
+       netdev_info(netdev, "changing MTU from %d to %d\n",
+                   netdev->mtu, new_mtu);
+       netdev->mtu = new_mtu;
+       if (netif_running(netdev))
+               i40e_vsi_reinit_locked(vsi);
+       return 0;
+}
+
+#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL)
+/**
+ * i40e_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ **/
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+#ifdef HAVE_PTP_1588_CLOCK
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+
+#endif /* HAVE_PTP_1588_CLOCK */
+       switch (cmd) {
+#ifdef HAVE_PTP_1588_CLOCK
+#ifdef SIOCGHWTSTAMP
+       case SIOCGHWTSTAMP:
+               return i40e_ptp_get_ts_config(pf, ifr);
+#endif
+       case SIOCSHWTSTAMP:
+               return i40e_ptp_set_ts_config(pf, ifr);
+#endif /* HAVE_PTP_1588_CLOCK */
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+#endif
+/**
+ * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+{
+       struct i40e_vsi_context ctxt;
+       i40e_status ret;
+
+       if ((vsi->info.valid_sections &
+            cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+           ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
+               return;  /* already enabled */
+
+       vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+       vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+                                   I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+
+       ctxt.seid = vsi->seid;
+       ctxt.info = vsi->info;
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
+               dev_info(&vsi->back->pdev->dev,
+                        "update vlan stripping failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
+       }
+}
+
+/**
+ * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+{
+       struct i40e_vsi_context ctxt;
+       i40e_status ret;
+
+       if ((vsi->info.valid_sections &
+            cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+           ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+            I40E_AQ_VSI_PVLAN_EMOD_MASK))
+               return;  /* already disabled */
+
+       vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+       vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+                                   I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+       ctxt.seid = vsi->seid;
+       ctxt.info = vsi->info;
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
+               dev_info(&vsi->back->pdev->dev,
+                        "update vlan stripping failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
+       }
+}
+
+#ifdef HAVE_VLAN_RX_REGISTER
+/**
+ * i40e_vlan_rx_register - Setup or shutdown vlan offload
+ * @netdev: network interface to be adjusted
+ * @grp: new vlan group list, NULL if disabling
+ **/
+static void i40e_vlan_rx_register(struct net_device *netdev,
+                                 struct vlan_group *grp)
+#else /* HAVE_VLAN_RX_REGISTER */
+/**
+ * i40e_vlan_rx_register - Setup or shutdown vlan offload
+ * @netdev: network interface to be adjusted
+ * @features: netdev features to test if VLAN offload is enabled or not
+ **/
+static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
+#endif /* HAVE_VLAN_RX_REGISTER */
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+#ifdef HAVE_VLAN_RX_REGISTER
+       bool enable;
+
+       vsi->vlgrp = grp;
+       enable = (grp || (vsi->back->flags & I40E_FLAG_DCB_ENABLED));
+       if (enable)
+               i40e_vlan_stripping_enable(vsi);
+#else /* HAVE_VLAN_RX_REGISTER */
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               i40e_vlan_stripping_enable(vsi);
+#else
+       if (features & NETIF_F_HW_VLAN_RX)
+               i40e_vlan_stripping_enable(vsi);
+#endif
+#endif /* HAVE_VLAN_RX_REGISTER */
+       else
+               i40e_vlan_stripping_disable(vsi);
+}
+
+/**
+ * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be added (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+       struct i40e_mac_filter *f, *add_f;
+       bool is_netdev, is_vf;
+
+       is_vf = (vsi->type == I40E_VSI_SRIOV);
+       is_netdev = !!(vsi->netdev);
+
+       /* Locked once because all functions invoked below iterates list*/
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
+       if (is_netdev) {
+               add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
+                                       is_vf, is_netdev);
+               if (!add_f) {
+                       dev_info(&vsi->back->pdev->dev,
+                                "Could not add vlan filter %d for %pM\n",
+                                vid, vsi->netdev->dev_addr);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       return -ENOMEM;
+               }
+       }
+
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+               if (!add_f) {
+                       dev_info(&vsi->back->pdev->dev,
+                                "Could not add vlan filter %d for %pM\n",
+                                vid, f->macaddr);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       return -ENOMEM;
+               }
+       }
+
+       /* Now if we add a vlan tag, make sure to check if it is the first
+        * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
+        * with 0, so we now accept untagged and specified tagged traffic
+        * (and not any taged and untagged)
+        */
+       if (vid > 0) {
+               if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
+                                                 I40E_VLAN_ANY,
+                                                 is_vf, is_netdev)) {
+                       i40e_del_filter(vsi, vsi->netdev->dev_addr,
+                                       I40E_VLAN_ANY, is_vf, is_netdev);
+                       add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
+                                               is_vf, is_netdev);
+                       if (!add_f) {
+                               dev_info(&vsi->back->pdev->dev,
+                                        "Could not add filter 0 for %pM\n",
+                                        vsi->netdev->dev_addr);
+                               spin_unlock_bh(&vsi->mac_filter_list_lock);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
+       if (vid > 0 && !vsi->info.pvid) {
+               list_for_each_entry(f, &vsi->mac_filter_list, list) {
+                       if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+                                            is_vf, is_netdev))
+                               continue;
+                       i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+                                       is_vf, is_netdev);
+                       add_f = i40e_add_filter(vsi, f->macaddr,
+                                               0, is_vf, is_netdev);
+                       if (!add_f) {
+                               dev_info(&vsi->back->pdev->dev,
+                                       "Could not add filter 0 for %pM\n",
+                                       f->macaddr);
+                               spin_unlock_bh(&vsi->mac_filter_list_lock);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       /* Make sure to release before sync_vsi_filter because that
+        * function will lock/unlock as necessary
+        */
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+           test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+               return 0;
+
+       return i40e_sync_vsi_filters(vsi, false);
+}
+
+/**
+ * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be removed (0 = untagged only , -1 = any)
+ *
+ * Return: 0 on success or negative otherwise
+ **/
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+       struct net_device *netdev = vsi->netdev;
+       struct i40e_mac_filter *f, *add_f;
+       bool is_vf, is_netdev;
+       int filter_count = 0;
+
+       is_vf = (vsi->type == I40E_VSI_SRIOV);
+       is_netdev = !!(netdev);
+
+       /* Locked once because all functions invoked below iterates list */
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
+       if (is_netdev)
+               i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
+
+       list_for_each_entry(f, &vsi->mac_filter_list, list)
+               i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+
+       /* go through all the filters for this VSI and if there is only
+        * vid == 0 it means there are no other filters, so vid 0 must
+        * be replaced with -1. This signifies that we should from now
+        * on accept any traffic (with any tag present, or untagged)
+        */
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if (is_netdev) {
+                       if (f->vlan &&
+                           ether_addr_equal(netdev->dev_addr, f->macaddr))
+                               filter_count++;
+               }
+
+               if (f->vlan)
+                       filter_count++;
+       }
+
+       if (!filter_count && is_netdev) {
+               i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
+               f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
+                                   is_vf, is_netdev);
+               if (!f) {
+                       dev_info(&vsi->back->pdev->dev,
+                                "Could not add filter %d for %pM\n",
+                                I40E_VLAN_ANY, netdev->dev_addr);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       return -ENOMEM;
+               }
+       }
+
+       if (!filter_count) {
+               list_for_each_entry(f, &vsi->mac_filter_list, list) {
+                       i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
+                       add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+                                               is_vf, is_netdev);
+                       if (!add_f) {
+                               dev_info(&vsi->back->pdev->dev,
+                                        "Could not add filter %d for %pM\n",
+                                        I40E_VLAN_ANY, f->macaddr);
+                               spin_unlock_bh(&vsi->mac_filter_list_lock);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       /* Make sure to release before sync_vsi_filter because that
+        * function with lock/unlock as necessary
+        */
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+           test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+               return 0;
+
+       return i40e_sync_vsi_filters(vsi, false);
+}
+
+/**
+ * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be added
+ *
+ * net_device_ops implementation for adding vlan ids
+ **/
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+#ifdef I40E_FCOE
+int i40e_vlan_rx_add_vid(struct net_device *netdev,
+                        __always_unused __be16 proto, u16 vid)
+#else
+static int i40e_vlan_rx_add_vid(struct net_device *netdev,
+                               __always_unused __be16 proto, u16 vid)
+#endif
+#else
+#ifdef I40E_FCOE
+int i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#else
+static int i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif
+#endif
+#else
+#ifdef I40E_FCOE
+void i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#else
+static void i40e_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       int ret = 0;
+
+       if (vid > 4095)
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+               return -EINVAL;
+#else
+               return;
+#endif
+
+       netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
+
+       /* If the network stack called us with vid = 0 then
+        * it is asking to receive priority tagged packets with
+        * vlan id 0.  Our HW receives them by default when configured
+        * to receive untagged packets so there is no need to add an
+        * extra filter for vlan 0 tagged packets.
+        */
+       if (vid)
+               ret = i40e_vsi_add_vlan(vsi, vid);
+
+#ifndef HAVE_VLAN_RX_REGISTER
+       if (!ret && (vid < VLAN_N_VID))
+               set_bit(vid, vsi->active_vlans);
+#endif /* HAVE_VLAN_RX_REGISTER */
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+
+       /* Copy feature flags from netdev to the vlan netdev for this vid.
+        * This allows things like TSO to bubble down to our vlan device.
+        * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so
+        * we will not have a netdev that needs updating.
+        */
+       if (vsi->vlgrp) {
+               struct vlan_group *vlgrp = vsi->vlgrp;
+               struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
+               if (v_netdev) {
+                       v_netdev->features |= netdev->features;
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
+                       v_netdev->enc_features |= netdev->enc_features;
+#endif
+                       vlan_group_set_device(vlgrp, vid, v_netdev);
+               }
+       }
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+       return ret;
+#endif
+}
+
+/**
+ * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be removed
+ *
+ * net_device_ops implementation for removing vlan ids
+ **/
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+#ifdef I40E_FCOE
+int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+                         __always_unused __be16 proto, u16 vid)
+#else
+static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+                                __always_unused __be16 proto, u16 vid)
+#endif
+#else
+#ifdef I40E_FCOE
+int i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#else
+static int i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif
+#endif
+#else
+#ifdef I40E_FCOE
+void i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#else
+static void i40e_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+
+       netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
+
+       /* return code is ignored as there is nothing a user
+        * can do about failure to remove and a log message was
+        * already printed from the other function
+        */
+       i40e_vsi_kill_vlan(vsi, vid);
+#ifndef HAVE_VLAN_RX_REGISTER
+
+       clear_bit(vid, vsi->active_vlans);
+#endif /* HAVE_VLAN_RX_REGISTER */
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+
+       return 0;
+#endif
+}
+
+/**
+ * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
+ * @vsi: the vsi being brought back up
+ **/
+static void i40e_restore_vlan(struct i40e_vsi *vsi)
+{
+       u16 vid;
+
+       if (!vsi->netdev)
+               return;
+
+#ifdef HAVE_VLAN_RX_REGISTER
+       i40e_vlan_rx_register(vsi->netdev, vsi->vlgrp);
+
+       if (vsi->vlgrp) {
+               for (vid = 0; vid < VLAN_N_VID; vid++) {
+                       if (!vlan_group_get_device(vsi->vlgrp, vid))
+                               continue;
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+                       i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
+                                            vid);
+#else
+                       i40e_vlan_rx_add_vid(vsi->netdev, vid);
+#endif
+               }
+       }
+#else /* HAVE_VLAN_RX_REGISTER */
+       i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+
+       for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+               i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
+                                    vid);
+#else
+               i40e_vlan_rx_add_vid(vsi->netdev, vid);
+#endif
+#endif
+}
+
+/**
+ * i40e_vsi_add_pvid - Add pvid for the VSI
+ * @vsi: the vsi being adjusted
+ * @vid: the vlan id to set as a PVID
+ **/
+int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+{
+       struct i40e_vsi_context ctxt;
+       i40e_status ret;
+
+       vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+       vsi->info.pvid = cpu_to_le16(vid);
+       vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
+                                   I40E_AQ_VSI_PVLAN_INSERT_PVID |
+                                   I40E_AQ_VSI_PVLAN_EMOD_STR;
+
+       ctxt.seid = vsi->seid;
+       ctxt.info = vsi->info;
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
+               dev_info(&vsi->back->pdev->dev,
+                        "add pvid failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_remove_pvid - Remove the pvid from the VSI
+ * @vsi: the vsi being adjusted
+ *
+ * Just use the vlan_rx_register() service to put it back to normal
+ **/
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
+{
+       i40e_vlan_stripping_disable(vsi);
+
+       vsi->info.pvid = 0;
+}
+#ifdef I40E_ADD_CLOUD_FILTER_OFFLOAD
+
+/**
+ * i40e_add_del_cloud_filter - Add/del cloud filter
+ * @pf: pointer to the physical function struct
+ * @filter: cloud filter rule
+ * @vsi: pointer to the destination vsi
+ * @add: if true, add, if false, delete
+ *
+ * Add or delete a cloud filter for a specific flow spec.
+ * Returns 0 if the filter were successfully added.
+ **/
+int i40e_add_del_cloud_filter(struct i40e_pf *pf,
+                             struct i40e_cloud_filter *filter,
+                             struct i40e_vsi *vsi, bool add)
+{
+       struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+       u32 ipaddr;
+       int ret;
+       static const u16 flag_table[128] = {
+               [I40E_CLOUD_FILTER_FLAGS_OMAC]  =
+                       I40E_AQC_ADD_CLOUD_FILTER_OMAC,
+               [I40E_CLOUD_FILTER_FLAGS_IMAC]  =
+                       I40E_AQC_ADD_CLOUD_FILTER_IMAC,
+               [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN]  =
+                       I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
+               [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
+                       I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
+               [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
+                       I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
+               [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
+                       I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
+               [I40E_CLOUD_FILTER_FLAGS_IIP] =
+                       I40E_AQC_ADD_CLOUD_FILTER_IIP,
+       };
+
+       if (vsi == NULL)
+               return I40E_ERR_BAD_PTR;
+
+       if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_XVLAN)
+               return I40E_ERR_NOT_IMPLEMENTED;
+
+       if ((filter->flags >= ARRAY_SIZE(flag_table)) ||
+           (flag_table[filter->flags] == 0))
+               return I40E_ERR_CONFIG;
+
+       memset(&cld_filter, 0, sizeof(cld_filter));
+       ether_addr_copy(cld_filter.outer_mac, filter->outer_mac);
+       ether_addr_copy(cld_filter.inner_mac, filter->inner_mac);
+
+       /* the low index of data storing IP address indicate the last
+        * byte on wire.
+        */
+       ipaddr = ntohl(filter->inner_ip[0]);
+       memcpy(&cld_filter.ipaddr.v4.data, &ipaddr, 4);
+       cld_filter.inner_vlan = cpu_to_le16(ntohs(filter->inner_vlan));
+       cld_filter.tenant_id = cpu_to_le32(filter->tenant_id);
+       cld_filter.queue_number = cpu_to_le16(filter->queue_id);
+
+       /* Only supports VXLAN tunnel for now */
+       cld_filter.flags = cpu_to_le16(
+                               I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN <<
+                               I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
+
+       if (filter->flags != I40E_CLOUD_FILTER_FLAGS_OMAC)
+               cld_filter.flags |=
+                       cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE);
+
+       cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
+                       I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
+
+       if (add)
+               ret = i40e_aq_add_cloud_filters(&pf->hw, vsi->seid,
+                                               &cld_filter, 1);
+       else
+               ret = i40e_aq_remove_cloud_filters(&pf->hw, vsi->seid,
+                                               &cld_filter, 1);
+       if (ret)
+               dev_err(&pf->pdev->dev,
+                       "fail to %s cloud filter, err %d aq_err %d\n",
+                       add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
+       return ret;
+}
+#endif /* I40E_ADD_CLOUD_FILTER_OFFLOAD */
+
+/**
+ * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
+{
+       int i, err = 0;
+
+       for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+               err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
+
+       return err;
+}
+
+/**
+ * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free VSI's transmit software resources
+ **/
+static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
+{
+       int i;
+
+       if (!vsi->tx_rings)
+               return;
+
+       for (i = 0; i < vsi->num_queue_pairs; i++)
+               if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+                       i40e_free_tx_resources(vsi->tx_rings[i]);
+}
+
+/**
+ * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
+{
+       int i, err = 0;
+
+       for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+               err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
+#ifdef I40E_FCOE
+       i40e_fcoe_setup_ddp_resources(vsi);
+#endif
+       return err;
+}
+
+/**
+ * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free all receive software resources
+ **/
+static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
+{
+       int i;
+
+       if (!vsi->rx_rings)
+               return;
+
+       for (i = 0; i < vsi->num_queue_pairs; i++)
+               if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
+                       i40e_free_rx_resources(vsi->rx_rings[i]);
+#ifdef I40E_FCOE
+       i40e_fcoe_free_ddp_resources(vsi);
+#endif
+}
+
+/**
+ * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
+ * @ring: The Tx ring to configure
+ *
+ * This enables/disables XPS for a given Tx descriptor ring
+ * based on the TCs enabled for the VSI that ring belongs to.
+ **/
+static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
+{
+       struct i40e_vsi *vsi = ring->vsi;
+       cpumask_var_t mask;
+
+       if (!ring->q_vector || !ring->netdev)
+               return;
+
+       /* Single TC mode enable XPS */
+       if (vsi->tc_config.numtc <= 1) {
+               if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
+                       netif_set_xps_queue(ring->netdev,
+                                           &ring->q_vector->affinity_mask,
+                                           ring->queue_index);
+       } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+               /* Disable XPS to allow selection based on TC */
+               bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
+               netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
+               free_cpumask_var(mask);
+       }
+}
+
+/**
+ * i40e_configure_tx_ring - Configure a transmit ring context and rest
+ * @ring: The Tx ring to configure
+ *
+ * Configure the Tx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_tx_ring(struct i40e_ring *ring)
+{
+       struct i40e_vsi *vsi = ring->vsi;
+       u16 pf_q = vsi->base_queue + ring->queue_index;
+       struct i40e_hw *hw = &vsi->back->hw;
+       struct i40e_hmc_obj_txq tx_ctx;
+       i40e_status err = 0;
+       u32 qtx_ctl = 0;
+
+       /* some ATR related tx ring init */
+       if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
+               ring->atr_sample_rate = vsi->back->atr_sample_rate;
+               ring->atr_count = 0;
+       } else {
+               ring->atr_sample_rate = 0;
+       }
+       /* configure XPS */
+       i40e_config_xps_tx_ring(ring);
+
+       /* clear the context structure first */
+       memset(&tx_ctx, 0, sizeof(tx_ctx));
+
+       tx_ctx.new_context = 1;
+       tx_ctx.base = (ring->dma / 128);
+       tx_ctx.qlen = ring->count;
+       tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
+                                              I40E_FLAG_FD_ATR_ENABLED));
+#ifdef I40E_FCOE
+       tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
+       tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+#endif /* HAVE_PTP_1588_CLOCK */
+       /* FDIR VSI tx ring can still use RS bit and writebacks */
+       if (vsi->type != I40E_VSI_FDIR)
+               tx_ctx.head_wb_ena = 1;
+       tx_ctx.head_wb_addr = ring->dma +
+                             (ring->count * sizeof(struct i40e_tx_desc));
+
+       /* As part of VSI creation/update, FW allocates certain
+        * Tx arbitration queue sets for each TC enabled for
+        * the VSI. The FW returns the handles to these queue
+        * sets as part of the response buffer to Add VSI,
+        * Update VSI, etc. AQ commands. It is expected that
+        * these queue set handles be associated with the Tx
+        * queues by the driver as part of the TX queue context
+        * initialization. This has to be done regardless of
+        * DCB as by default everything is mapped to TC0.
+        */
+       tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+       tx_ctx.rdylist_act = 0;
+
+       /* clear the context in the HMC */
+       err = i40e_clear_lan_tx_queue_context(hw, pf_q);
+       if (err) {
+               dev_info(&vsi->back->pdev->dev,
+                        "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
+                        ring->queue_index, pf_q, err);
+               return -ENOMEM;
+       }
+
+       /* set the context in the HMC */
+       err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
+       if (err) {
+               dev_info(&vsi->back->pdev->dev,
+                        "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
+                        ring->queue_index, pf_q, err);
+               return -ENOMEM;
+       }
+
+       /* Now associate this queue with this PCI function */
+       if (vsi->type == I40E_VSI_VMDQ2) {
+               qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+               qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+                          I40E_QTX_CTL_VFVM_INDX_MASK;
+       } else {
+               qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+       }
+
+       qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+                   I40E_QTX_CTL_PF_INDX_MASK);
+       wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
+       i40e_flush(hw);
+
+       clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+
+       /* cache tail off for easier writes later */
+       ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
+
+       return 0;
+}
+
+/**
+ * i40e_configure_rx_ring - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_rx_ring(struct i40e_ring *ring)
+{
+       struct i40e_vsi *vsi = ring->vsi;
+       u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
+       u16 pf_q = vsi->base_queue + ring->queue_index;
+       struct i40e_hw *hw = &vsi->back->hw;
+       struct i40e_hmc_obj_rxq rx_ctx;
+       i40e_status err = 0;
+
+       ring->state = 0;
+
+       /* clear the context structure first */
+       memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+       ring->rx_buf_len = vsi->rx_buf_len;
+       ring->rx_hdr_len = vsi->rx_hdr_len;
+
+       rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+       rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+       rx_ctx.base = (ring->dma / 128);
+       rx_ctx.qlen = ring->count;
+
+       if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
+               set_ring_16byte_desc_enabled(ring);
+               rx_ctx.dsize = 0;
+       } else {
+               rx_ctx.dsize = 1;
+       }
+
+       rx_ctx.dtype = vsi->dtype;
+       if (vsi->dtype) {
+               set_ring_ps_enabled(ring);
+               rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
+                                 I40E_RX_SPLIT_IP      |
+                                 I40E_RX_SPLIT_TCP_UDP |
+                                 I40E_RX_SPLIT_SCTP;
+       } else {
+               rx_ctx.hsplit_0 = 0;
+       }
+
+       rx_ctx.rxmax = min_t(u16, vsi->max_frame,
+                                 (chain_len * ring->rx_buf_len));
+       rx_ctx.lrxqthresh = 2;
+       rx_ctx.crcstrip = 1;
+       rx_ctx.l2tsel = 1;
+       /* this controls whether VLAN is stripped from inner headers */
+       rx_ctx.showiv = 0;
+#ifdef I40E_FCOE
+       rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
+#endif
+       /* set the prefena field to 1 because the manual says to */
+       rx_ctx.prefena = 1;
+
+       /* clear the context in the HMC */
+       err = i40e_clear_lan_rx_queue_context(hw, pf_q);
+       if (err) {
+               dev_info(&vsi->back->pdev->dev,
+                        "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+                        ring->queue_index, pf_q, err);
+               return -ENOMEM;
+       }
+
+       /* set the context in the HMC */
+       err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
+       if (err) {
+               dev_info(&vsi->back->pdev->dev,
+                        "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+                        ring->queue_index, pf_q, err);
+               return -ENOMEM;
+       }
+
+       /* cache tail for quicker writes, and clear the reg before use */
+       ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
+       writel(0, ring->tail);
+
+       if (ring_is_ps_enabled(ring)) {
+               i40e_alloc_rx_headers(ring);
+               i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
+       } else {
+               i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_configure_tx - Configure the VSI for Tx
+ * @vsi: VSI structure describing this set of rings and resources
+ *
+ * Configure the Tx VSI for operation.
+ **/
+static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
+{
+       int err = 0;
+       u16 i;
+
+       for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+               err = i40e_configure_tx_ring(vsi->tx_rings[i]);
+
+       return err;
+}
+
+/**
+ * i40e_vsi_configure_rx - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Configure the Rx VSI for operation.
+ **/
+static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
+{
+       int err = 0;
+       u16 i;
+
+       if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
+               vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
+                              + ETH_FCS_LEN + VLAN_HLEN;
+       else
+               vsi->max_frame = I40E_RXBUFFER_2048;
+
+       /* figure out correct receive buffer length */
+       switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
+                                   I40E_FLAG_RX_PS_ENABLED)) {
+       case I40E_FLAG_RX_1BUF_ENABLED:
+               vsi->rx_hdr_len = 0;
+               vsi->rx_buf_len = vsi->max_frame;
+               vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
+               break;
+       case I40E_FLAG_RX_PS_ENABLED:
+               vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+               vsi->rx_buf_len = I40E_RXBUFFER_2048;
+               vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
+               break;
+       default:
+               vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+               vsi->rx_buf_len = I40E_RXBUFFER_2048;
+               vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
+               break;
+       }
+
+#ifdef I40E_FCOE
+       /* setup rx buffer for FCoE */
+       if ((vsi->type == I40E_VSI_FCOE) &&
+           (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
+               vsi->rx_hdr_len = 0;
+               vsi->rx_buf_len = I40E_RXBUFFER_3072;
+               vsi->max_frame = I40E_RXBUFFER_3072;
+               vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
+       }
+
+#endif /* I40E_FCOE */
+       /* round up for the chip's needs */
+       vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
+                               BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
+       vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
+                               BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
+
+       /* set up individual rings */
+       for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+               err = i40e_configure_rx_ring(vsi->rx_rings[i]);
+
+       return err;
+}
+
+/**
+ * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
+{
+       struct i40e_ring *tx_ring, *rx_ring;
+       u16 qoffset, qcount;
+       int i, n;
+
+       if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+               /* Reset the TC information */
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       rx_ring = vsi->rx_rings[i];
+                       tx_ring = vsi->tx_rings[i];
+                       rx_ring->dcb_tc = 0;
+                       tx_ring->dcb_tc = 0;
+               }
+       }
+
+       for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
+               if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
+                       continue;
+
+               qoffset = vsi->tc_config.tc_info[n].qoffset;
+               qcount = vsi->tc_config.tc_info[n].qcount;
+               for (i = qoffset; i < (qoffset + qcount); i++) {
+                       rx_ring = vsi->rx_rings[i];
+                       tx_ring = vsi->tx_rings[i];
+                       rx_ring->dcb_tc = n;
+                       tx_ring->dcb_tc = n;
+               }
+       }
+}
+
+/**
+ * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
+{
+       if (vsi->netdev)
+               i40e_set_rx_mode(vsi->netdev);
+}
+
+/**
+ * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
+ * @vsi: Pointer to the targeted VSI
+ *
+ * This function replays the hlist on the hw where all the SB Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
+{
+       struct i40e_fdir_filter *filter;
+       struct i40e_pf *pf = vsi->back;
+       struct hlist_node *node;
+
+       if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+               return;
+
+       hlist_for_each_entry_safe(filter, node,
+                                 &pf->fdir_filter_list, fdir_node) {
+               i40e_add_del_fdir(vsi, filter, true);
+       }
+}
+
+/**
+ * i40e_vsi_configure - Set up the VSI for action
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_configure(struct i40e_vsi *vsi)
+{
+       int err;
+
+       i40e_set_vsi_rx_mode(vsi);
+       i40e_restore_vlan(vsi);
+       i40e_vsi_config_dcb_rings(vsi);
+       err = i40e_vsi_configure_tx(vsi);
+       if (!err)
+               err = i40e_vsi_configure_rx(vsi);
+
+       return err;
+}
+
+/**
+ * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u16 vector;
+       int i, q;
+       u32 qp;
+
+       /* The interrupt indexing is offset by 1 in the PFINT_ITRn
+        * and PFINT_LNKLSTn registers, e.g.:
+        *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
+        */
+       qp = vsi->base_queue;
+       vector = vsi->base_vector;
+       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               struct i40e_q_vector *q_vector = vsi->q_vectors[i];
+
+               q_vector->itr_countdown = ITR_COUNTDOWN_START;
+               q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+               q_vector->rx.latency_range = I40E_LOW_LATENCY;
+               wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
+                    q_vector->rx.itr);
+               q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+               q_vector->tx.latency_range = I40E_LOW_LATENCY;
+               wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
+                    q_vector->tx.itr);
+               wr32(hw, I40E_PFINT_RATEN(vector - 1),
+                    INTRL_USEC_TO_REG(vsi->int_rate_limit));
+
+               /* Linked list for the queuepairs assigned to this vector */
+               wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
+               for (q = 0; q < q_vector->num_ringpairs; q++) {
+                       u32 val;
+
+                       val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+                             (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
+                             (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+                             (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
+                             (I40E_QUEUE_TYPE_TX
+                                     << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+
+                       wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+                       val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+                             (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
+                             (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+                             ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
+                             (I40E_QUEUE_TYPE_RX
+                                     << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+                       /* Terminate the linked list */
+                       if (q == (q_vector->num_ringpairs - 1))
+                               val |= (I40E_QUEUE_END_OF_LIST
+                                          << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+                       wr32(hw, I40E_QINT_TQCTL(qp), val);
+                       qp++;
+               }
+       }
+
+       i40e_flush(hw);
+}
+
+/**
+ * i40e_enable_misc_int_causes - enable the non-queue interrupts
+ * @hw: ptr to the hardware info
+ **/
+static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       /* clear things first */
+       wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
+       rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
+
+       val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
+             I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
+             I40E_PFINT_ICR0_ENA_GRST_MASK          |
+             I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
+             I40E_PFINT_ICR0_ENA_GPIO_MASK          |
+             I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
+             I40E_PFINT_ICR0_ENA_VFLR_MASK          |
+             I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+#ifdef HAVE_PTP_1588_CLOCK
+
+       if (pf->flags & I40E_FLAG_PTP)
+               val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+#endif /* HAVE_PTP_1588_CLOCK */
+
+       wr32(hw, I40E_PFINT_ICR0_ENA, val);
+
+       /* SW_ITR_IDX = 0, but don't change INTENA */
+       wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+                                       I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
+
+       /* OTHER_ITR_IDX = 0 */
+       wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+}
+
+/**
+ * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
+{
+       struct i40e_q_vector *q_vector = vsi->q_vectors[0];
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       /* set the ITR configuration */
+       q_vector->itr_countdown = ITR_COUNTDOWN_START;
+       q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+       q_vector->rx.latency_range = I40E_LOW_LATENCY;
+       wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
+       q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+       q_vector->tx.latency_range = I40E_LOW_LATENCY;
+       wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
+
+       i40e_enable_misc_int_causes(pf);
+
+       /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+       wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+       /* Associate the queue pair to the vector and enable the queue int */
+       val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
+             (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+             (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+       wr32(hw, I40E_QINT_RQCTL(0), val);
+
+       val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
+             (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+             (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+       wr32(hw, I40E_QINT_TQCTL(0), val);
+       i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+
+       wr32(hw, I40E_PFINT_DYN_CTL0,
+            I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+       i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
+             I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+             (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+
+       wr32(hw, I40E_PFINT_DYN_CTL0, val);
+       i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_disable - Disable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: disable a particular Hw Interrupt vector
+ **/
+void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+       wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+       i40e_flush(hw);
+}
+
+/**
+ * i40e_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
+{
+       struct i40e_q_vector *q_vector = data;
+
+       if (!q_vector->tx.ring && !q_vector->rx.ring)
+               return IRQ_HANDLED;
+
+       napi_schedule(&q_vector->napi);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ *
+ * Allocates MSI-X vectors and requests interrupts from the kernel.
+ **/
+int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+{
+       int q_vectors = vsi->num_q_vectors;
+       struct i40e_pf *pf = vsi->back;
+       int base = vsi->base_vector;
+       int rx_int_idx = 0;
+       int tx_int_idx = 0;
+       int vector, err;
+
+       for (vector = 0; vector < q_vectors; vector++) {
+               struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
+
+               if (q_vector->tx.ring && q_vector->rx.ring) {
+                       snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+                                "%s-%s-%d", basename, "TxRx", rx_int_idx++);
+                       tx_int_idx++;
+               } else if (q_vector->rx.ring) {
+                       snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+                                "%s-%s-%d", basename, "rx", rx_int_idx++);
+               } else if (q_vector->tx.ring) {
+                       snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+                                "%s-%s-%d", basename, "tx", tx_int_idx++);
+               } else {
+                       /* skip this unused q_vector */
+                       continue;
+               }
+               err = request_irq(pf->msix_entries[base + vector].vector,
+                                 vsi->irq_handler,
+                                 0,
+                                 q_vector->name,
+                                 q_vector);
+               if (err) {
+                       dev_info(&pf->pdev->dev,
+                                "MSIX request_irq failed, error: %d\n", err);
+                       goto free_queue_irqs;
+               }
+#ifdef HAVE_IRQ_AFFINITY_HINT
+               /* assign the mask for this irq */
+               irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+                                     &q_vector->affinity_mask);
+#endif /* HAVE_IRQ_AFFINITY_HINT */
+       }
+
+       vsi->irqs_ready = true;
+       return 0;
+
+free_queue_irqs:
+       while (vector) {
+               vector--;
+#ifdef HAVE_IRQ_AFFINITY_HINT
+               irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+                                     NULL);
+#endif
+               free_irq(pf->msix_entries[base + vector].vector,
+                        &(vsi->q_vectors[vector]));
+       }
+       return err;
+}
+
+/**
+ * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ **/
+static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       int base = vsi->base_vector;
+       int i;
+
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
+               wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
+       }
+
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               for (i = vsi->base_vector;
+                    i < (vsi->num_q_vectors + vsi->base_vector); i++)
+                       wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
+
+               i40e_flush(hw);
+               for (i = 0; i < vsi->num_q_vectors; i++)
+                       synchronize_irq(pf->msix_entries[i + base].vector);
+       } else {
+               /* Legacy and MSI mode - this stops all interrupt handling */
+               wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+               wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+               i40e_flush(hw);
+               synchronize_irq(pf->pdev->irq);
+       }
+}
+
+/**
+ * i40e_vsi_enable_irq - Enable IRQ for the given VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       int i;
+
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               for (i = 0; i < vsi->num_q_vectors; i++)
+                       i40e_irq_dynamic_enable(vsi, i);
+       } else {
+               i40e_irq_dynamic_enable_icr0(pf);
+       }
+
+       i40e_flush(&pf->hw);
+       return 0;
+}
+
+/**
+ * i40e_stop_misc_vector - Stop the vector that handles non-queue events
+ * @pf: board private structure
+ **/
+static void i40e_stop_misc_vector(struct i40e_pf *pf)
+{
+       /* Disable ICR 0 */
+       wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
+       i40e_flush(&pf->hw);
+}
+
+/**
+ * i40e_intr - MSI/Legacy and non-queue interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ *
+ * This is the handler used for all MSI/Legacy interrupts, and deals
+ * with both queue and non-queue interrupts.  This is also used in
+ * MSIX mode to handle the non-queue interrupts.
+ **/
+static irqreturn_t i40e_intr(int irq, void *data)
+{
+       struct i40e_pf *pf = (struct i40e_pf *)data;
+       struct i40e_hw *hw = &pf->hw;
+       irqreturn_t ret = IRQ_NONE;
+       u32 icr0, icr0_remaining;
+       u32 val, ena_mask;
+
+       icr0 = rd32(hw, I40E_PFINT_ICR0);
+       ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+       /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+       if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+               goto enable_intr;
+
+       /* if interrupt but no bits showing, must be SWINT */
+       if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
+           (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
+               pf->sw_int_count++;
+
+       /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
+       if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+
+               /* temporarily disable queue cause for NAPI processing */
+               u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+
+               qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_RQCTL(0), qval);
+
+               qval = rd32(hw, I40E_QINT_TQCTL(0));
+               qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_TQCTL(0), qval);
+
+               if (!test_bit(__I40E_DOWN, &pf->state))
+                       napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
+       }
+
+       if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+               ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+               set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+       }
+
+       if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+               ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+               set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+       }
+
+       if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+               ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+               set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+       }
+
+       if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
+               if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+                       set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+               ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
+               val = rd32(hw, I40E_GLGEN_RSTAT);
+               val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
+                      >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+               if (val == I40E_RESET_CORER) {
+                       pf->corer_count++;
+               } else if (val == I40E_RESET_GLOBR) {
+                       pf->globr_count++;
+               } else if (val == I40E_RESET_EMPR) {
+                       pf->empr_count++;
+                       set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
+               }
+       }
+
+       if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+               icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
+               dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+               dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
+                        rd32(hw, I40E_PFHMC_ERRORINFO),
+                        rd32(hw, I40E_PFHMC_ERRORDATA));
+       }
+
+#ifdef HAVE_PTP_1588_CLOCK
+       if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
+               u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
+
+               if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
+                       icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+                       i40e_ptp_tx_hwtstamp(pf);
+               }
+       }
+
+#endif /* HAVE_PTP_1588_CLOCK */
+       /* If a critical error is pending we have no choice but to reset the
+        * device.
+        * Report and mask out any remaining unexpected interrupts.
+        */
+       icr0_remaining = icr0 & ena_mask;
+       if (icr0_remaining) {
+               dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
+                        icr0_remaining);
+               if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+                   (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
+                   (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
+                       dev_info(&pf->pdev->dev, "device will be reset\n");
+                       set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+                       i40e_service_event_schedule(pf);
+               }
+               ena_mask &= ~icr0_remaining;
+       }
+       ret = IRQ_HANDLED;
+
+enable_intr:
+       /* re-enable interrupt causes */
+       wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
+       if (!test_bit(__I40E_DOWN, &pf->state)) {
+               i40e_service_event_schedule(pf);
+               i40e_irq_dynamic_enable_icr0(pf);
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring:  tx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+       struct i40e_vsi *vsi = tx_ring->vsi;
+       u16 i = tx_ring->next_to_clean;
+       struct i40e_tx_buffer *tx_buf;
+       struct i40e_tx_desc *tx_desc;
+
+       tx_buf = &tx_ring->tx_bi[i];
+       tx_desc = I40E_TX_DESC(tx_ring, i);
+       i -= tx_ring->count;
+
+       do {
+               struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+               /* if next_to_watch is not set then there is no work pending */
+               if (!eop_desc)
+                       break;
+
+               /* prevent any other reads prior to eop_desc */
+               read_barrier_depends();
+
+               /* if the descriptor isn't done, no work yet to do */
+               if (!(eop_desc->cmd_type_offset_bsz &
+                     cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+                       break;
+
+               /* clear next_to_watch to prevent false hangs */
+               tx_buf->next_to_watch = NULL;
+
+               tx_desc->buffer_addr = 0;
+               tx_desc->cmd_type_offset_bsz = 0;
+               /* move past filter desc */
+               tx_buf++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_buf = tx_ring->tx_bi;
+                       tx_desc = I40E_TX_DESC(tx_ring, 0);
+               }
+               /* unmap skb header data */
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buf, dma),
+                                dma_unmap_len(tx_buf, len),
+                                DMA_TO_DEVICE);
+               if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
+                       kfree(tx_buf->raw_buf);
+
+               tx_buf->raw_buf = NULL;
+               tx_buf->tx_flags = 0;
+               tx_buf->next_to_watch = NULL;
+               dma_unmap_len_set(tx_buf, len, 0);
+               tx_desc->buffer_addr = 0;
+               tx_desc->cmd_type_offset_bsz = 0;
+
+               /* move us past the eop_desc for start of next FD desc */
+               tx_buf++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_buf = tx_ring->tx_bi;
+                       tx_desc = I40E_TX_DESC(tx_ring, 0);
+               }
+
+               /* update budget accounting */
+               budget--;
+       } while (likely(budget));
+
+       i += tx_ring->count;
+       tx_ring->next_to_clean = i;
+
+       if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+               i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
+
+       return budget > 0;
+}
+
+/**
+ * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
+{
+       struct i40e_q_vector *q_vector = data;
+       struct i40e_vsi *vsi;
+
+       if (!q_vector->tx.ring)
+               return IRQ_HANDLED;
+
+       vsi = q_vector->tx.ring->vsi;
+       i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * i40e_map_vector_to_qp - Assigns the queue pair to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @qp_idx: queue pair index
+ **/
+static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
+{
+       struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+       struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+       struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
+
+       tx_ring->q_vector = q_vector;
+       tx_ring->next = q_vector->tx.ring;
+       q_vector->tx.ring = tx_ring;
+       q_vector->tx.count++;
+
+       rx_ring->q_vector = q_vector;
+       rx_ring->next = q_vector->rx.ring;
+       q_vector->rx.ring = rx_ring;
+       q_vector->rx.count++;
+}
+
+/**
+ * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code.  Ideally, we'd have
+ * one vector per queue pair, but on a constrained vector budget, we
+ * group the queue pairs as "efficiently" as possible.
+ **/
+static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
+{
+       int qp_remaining = vsi->num_queue_pairs;
+       int q_vectors = vsi->num_q_vectors;
+       int num_ringpairs;
+       int v_start = 0;
+       int qp_idx = 0;
+
+       /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+        * group them so there are multiple queues per vector.
+        * It is also important to go through all the vectors available to be
+        * sure that if we don't use all the vectors, that the remaining vectors
+        * are cleared. This is especially important when decreasing the
+        * number of queues in use.
+        */
+       for (; v_start < q_vectors; v_start++) {
+               struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
+
+               num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+
+               q_vector->num_ringpairs = num_ringpairs;
+
+               q_vector->rx.count = 0;
+               q_vector->tx.count = 0;
+               q_vector->rx.ring = NULL;
+               q_vector->tx.ring = NULL;
+
+               while (num_ringpairs--) {
+                       i40e_map_vector_to_qp(vsi, v_start, qp_idx);
+                       qp_idx++;
+                       qp_remaining--;
+               }
+       }
+}
+
+/**
+ * i40e_vsi_request_irq - Request IRQ from the OS
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ **/
+static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
+{
+       struct i40e_pf *pf = vsi->back;
+       int err;
+
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+               err = i40e_vsi_request_irq_msix(vsi, basename);
+       else if (pf->flags & I40E_FLAG_MSI_ENABLED)
+               err = request_irq(pf->pdev->irq, i40e_intr, 0,
+                                 pf->int_name, pf);
+       else
+               err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
+                                 pf->int_name, pf);
+
+       if (err)
+               dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
+
+       return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40e_netpoll - A Polling 'interrupt'handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts.  It's not called while the normal interrupt routine is executing.
+ **/
+#ifdef I40E_FCOE
+void i40e_netpoll(struct net_device *netdev)
+#else
+static void i40e_netpoll(struct net_device *netdev)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       int i;
+
+       /* if interface is down do nothing */
+       if (test_bit(__I40E_DOWN, &vsi->state))
+               return;
+
+       pf->flags |= I40E_FLAG_IN_NETPOLL;
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               for (i = 0; i < vsi->num_q_vectors; i++)
+                       i40e_msix_clean_rings(0, vsi->q_vectors[i]);
+       } else {
+               i40e_intr(pf->pdev->irq, netdev);
+       }
+       pf->flags &= ~I40E_FLAG_IN_NETPOLL;
+}
+#endif
+
+/**
+ * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Tx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       int i;
+       u32 tx_reg;
+
+       for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+               tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
+               if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+                       break;
+
+               usleep_range(10, 20);
+       }
+       if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       int i, j, pf_q, ret = 0;
+       u32 tx_reg;
+
+       pf_q = vsi->base_queue;
+       for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+
+               /* warn the TX unit of coming changes */
+               i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+               if (!enable)
+                       usleep_range(10, 20);
+
+               for (j = 0; j < 50; j++) {
+                       tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+                       if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+                           ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+                               break;
+                       usleep_range(1000, 2000);
+               }
+               /* Skip if the queue is already in the requested state */
+               if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+                       continue;
+
+               /* turn on/off the queue */
+               if (enable) {
+                       wr32(hw, I40E_QTX_HEAD(pf_q), 0);
+                       tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+               } else {
+                       tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+               }
+
+               wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+               /* No waiting for the Tx queue to disable */
+               if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
+                       continue;
+
+               /* wait for the change to finish */
+               ret = i40e_pf_txq_wait(pf, pf_q, enable);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "VSI seid %d Tx ring %d %sable timeout\n",
+                                vsi->seid, pf_q, (enable ? "en" : "dis"));
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       int i;
+       u32 rx_reg;
+
+       for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+               rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
+               if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+                       break;
+
+               usleep_range(10, 20);
+       }
+       if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       int i, j, pf_q, ret = 0;
+       u32 rx_reg;
+
+       pf_q = vsi->base_queue;
+       for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+               for (j = 0; j < 50; j++) {
+                       rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+                       if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+                           ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+                               break;
+                       usleep_range(1000, 2000);
+               }
+
+               /* Skip if the queue is already in the requested state */
+               if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+                       continue;
+
+               /* turn on/off the queue */
+               if (enable)
+                       rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+               else
+                       rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+               wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+
+               /* wait for the change to finish */
+               ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "VSI seid %d Rx ring %d %sable timeout\n",
+                                vsi->seid, pf_q, (enable ? "en" : "dis"));
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_vsi_control_rings - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+{
+       int ret = 0;
+
+       /* do rx first for enable and last for disable */
+       if (request) {
+               ret = i40e_vsi_control_rx(vsi, request);
+               if (ret)
+                       return ret;
+               ret = i40e_vsi_control_tx(vsi, request);
+       } else {
+               /* Ignore return value, we need to shutdown whatever we can */
+               i40e_vsi_control_tx(vsi, request);
+               i40e_vsi_control_rx(vsi, request);
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_vsi_free_irq - Free the irq association with the OS
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       int base = vsi->base_vector;
+       u32 val, qp;
+       int i;
+
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               if (!vsi->q_vectors)
+                       return;
+
+               if (!vsi->irqs_ready)
+                       return;
+
+               vsi->irqs_ready = false;
+               for (i = 0; i < vsi->num_q_vectors; i++) {
+                       u16 vector = i + base;
+
+                       /* free only the irqs that were actually requested */
+                       if (!vsi->q_vectors[i] ||
+                           !vsi->q_vectors[i]->num_ringpairs)
+                               continue;
+
+#ifdef HAVE_IRQ_AFFINITY_HINT
+                       /* clear the affinity_mask in the IRQ descriptor */
+                       irq_set_affinity_hint(pf->msix_entries[vector].vector,
+                                             NULL);
+#endif
+                       free_irq(pf->msix_entries[vector].vector,
+                                vsi->q_vectors[i]);
+
+                       /* Tear down the interrupt queue link list
+                        *
+                        * We know that they come in pairs and always
+                        * the Rx first, then the Tx.  To clear the
+                        * link list, stick the EOL value into the
+                        * next_q field of the registers.
+                        */
+                       val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
+                       qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+                               >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+                       val |= I40E_QUEUE_END_OF_LIST
+                               << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+                       wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
+
+                       while (qp != I40E_QUEUE_END_OF_LIST) {
+                               u32 next;
+
+                               val = rd32(hw, I40E_QINT_RQCTL(qp));
+
+                               val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
+                                        I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+                                        I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
+                                        I40E_QINT_RQCTL_INTEVENT_MASK);
+
+                               val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+                                        I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+                               wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+                               val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+                               next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
+                                       >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
+
+                               val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
+                                        I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+                                        I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
+                                        I40E_QINT_TQCTL_INTEVENT_MASK);
+
+                               val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+                                        I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+                               wr32(hw, I40E_QINT_TQCTL(qp), val);
+                               qp = next;
+                       }
+               }
+       } else {
+               free_irq(pf->pdev->irq, pf);
+
+               val = rd32(hw, I40E_PFINT_LNKLST0);
+               qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+                       >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+               val |= I40E_QUEUE_END_OF_LIST
+                       << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+               wr32(hw, I40E_PFINT_LNKLST0, val);
+
+               val = rd32(hw, I40E_QINT_RQCTL(qp));
+               val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
+                        I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+                        I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
+                        I40E_QINT_RQCTL_INTEVENT_MASK);
+
+               val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+                       I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+               wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+               val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+               val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
+                        I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+                        I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
+                        I40E_QINT_TQCTL_INTEVENT_MASK);
+
+               val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+                       I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+               wr32(hw, I40E_QINT_TQCTL(qp), val);
+       }
+}
+
+/**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+       struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+       struct i40e_ring *ring;
+
+       if (!q_vector)
+               return;
+
+       /* disassociate q_vector from rings */
+       i40e_for_each_ring(ring, q_vector->tx)
+               ring->q_vector = NULL;
+
+       i40e_for_each_ring(ring, q_vector->rx)
+               ring->q_vector = NULL;
+
+       /* only VSI w/ an associated netdev is set up w/ NAPI */
+       if (vsi->netdev) {
+               napi_hash_del(&q_vector->napi);
+               netif_napi_del(&q_vector->napi);
+       }
+
+       vsi->q_vectors[v_idx] = NULL;
+
+       kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI being un-configured
+ *
+ * This frees the memory allocated to the q_vectors and
+ * deletes references to the NAPI struct.
+ **/
+static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
+{
+       int v_idx;
+
+       for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+               i40e_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * i40e_reset_interrupt_capability - Disable interrupt setup in OS
+ * @pf: board private structure
+ **/
+static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
+{
+       /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               pci_disable_msix(pf->pdev);
+               kfree(pf->msix_entries);
+               pf->msix_entries = NULL;
+               kfree(pf->irq_pile);
+               pf->irq_pile = NULL;
+       } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
+               pci_disable_msi(pf->pdev);
+       }
+       pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+}
+
+/**
+ * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @pf: board private structure
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
+{
+       int i;
+
+       i40e_stop_misc_vector(pf);
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               synchronize_irq(pf->msix_entries[0].vector);
+               free_irq(pf->msix_entries[0].vector, pf);
+       }
+
+       i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i])
+                       i40e_vsi_free_q_vectors(pf->vsi[i]);
+       i40e_reset_interrupt_capability(pf);
+}
+
+/**
+ * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_enable_all(struct i40e_vsi *vsi)
+{
+       int q_idx;
+
+       if (!vsi->netdev)
+               return;
+
+       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+               i40e_qv_init_lock(vsi->q_vectors[q_idx]);
+               napi_enable(&vsi->q_vectors[q_idx]->napi);
+       }
+}
+
+/**
+ * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_disable_all(struct i40e_vsi *vsi)
+{
+       int limiter = 20;
+       int q_idx;
+
+       if (!vsi->netdev)
+               return;
+
+       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+               napi_disable(&vsi->q_vectors[q_idx]->napi);
+               while (!i40e_qv_disable(vsi->q_vectors[q_idx]) && limiter--)
+                       usleep_range(1000, 2000);
+               if (!limiter)
+                       dev_info(&vsi->back->pdev->dev,
+                                "QV %d locked\n", q_idx);
+       }
+}
+
+/**
+ * i40e_vsi_close - Shut down a VSI
+ * @vsi: the vsi to be quelled
+ **/
+static void i40e_vsi_close(struct i40e_vsi *vsi)
+{
+       if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+               i40e_down(vsi);
+       i40e_vsi_free_irq(vsi);
+       i40e_vsi_free_tx_resources(vsi);
+       i40e_vsi_free_rx_resources(vsi);
+       vsi->current_netdev_flags = 0;
+}
+
+/**
+ * i40e_quiesce_vsi - Pause a given VSI
+ * @vsi: the VSI being paused
+ **/
+void i40e_quiesce_vsi(struct i40e_vsi *vsi)
+{
+       if (test_bit(__I40E_DOWN, &vsi->state))
+               return;
+
+       /* No need to disable FCoE VSI when Tx suspended */
+       if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
+            vsi->type == I40E_VSI_FCOE) {
+               dev_dbg(&vsi->back->pdev->dev,
+                        "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
+               return;
+       }
+
+       set_bit(__I40E_NEEDS_RESTART, &vsi->state);
+       if (vsi->netdev && netif_running(vsi->netdev))
+#ifdef HAVE_NET_DEVICE_OPS
+               vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+#else /* HAVE_NET_DEVICE_OPS */
+               vsi->netdev->stop(vsi->netdev);
+#endif /* HAVE_NET_DEVICE_OPS */
+       else
+               i40e_vsi_close(vsi);
+}
+
+/**
+ * i40e_unquiesce_vsi - Resume a given VSI
+ * @vsi: the VSI being resumed
+ **/
+void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
+{
+       if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
+               return;
+
+       clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+       if (vsi->netdev && netif_running(vsi->netdev))
+#ifdef HAVE_NET_DEVICE_OPS
+               vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+#else /* HAVE_NET_DEVICE_OPS */
+               vsi->netdev->open(vsi->netdev);
+#endif /* HAVE_NET_DEVICE_OPS */
+       else
+               i40e_vsi_open(vsi);   /* this clears the DOWN bit */
+}
+
+/**
+ * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ **/
+void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
+{
+       int v;
+
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               if (pf->vsi[v])
+                       i40e_quiesce_vsi(pf->vsi[v]);
+       }
+}
+
+/**
+ * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
+ * @pf: the PF
+ **/
+void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
+{
+       int v;
+
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               if (pf->vsi[v])
+                       i40e_unquiesce_vsi(pf->vsi[v]);
+       }
+}
+
+#ifdef CONFIG_DCB
+/**
+ * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
+ * @vsi: the VSI being configured
+ *
+ * This function waits for the given VSI's Tx queues to be disabled.
+**/
+static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       int i, pf_q, ret;
+
+       pf_q = vsi->base_queue;
+       for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+               /* Check and wait for the disable status of the queue */
+               ret = i40e_pf_txq_wait(pf, pf_q, false);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "VSI seid %d Tx ring %d disable timeout\n",
+                                vsi->seid, pf_q);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
+ * @pf: the PF
+ *
+ * This function waits for the Tx queues to be in disabled state for all the
+ * VSIs that are managed by this PF.
+ **/
+static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
+{
+       int v, ret = 0;
+
+       for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+               /* No need to wait for FCoE VSI queues */
+               if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
+                       ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
+                       if (ret)
+                               break;
+               }
+       }
+
+       return ret;
+}
+#endif /* I40E_DCB */
+
+/**
+ * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
+ * @pf: pointer to PF
+ *
+ * Get TC map for ISCSI PF type that will include iSCSI TC
+ * and LAN TC.
+ **/
+static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
+{
+       struct i40e_dcb_app_priority_table app;
+       struct i40e_hw *hw = &pf->hw;
+       u8 enabled_tc = 1; /* TC0 is always enabled */
+       u8 tc, i;
+       /* Get the iSCSI APP TLV */
+       struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+       for (i = 0; i < dcbcfg->numapps; i++) {
+               app = dcbcfg->app[i];
+               if (app.selector == I40E_APP_SEL_TCPIP &&
+                   app.protocolid == I40E_APP_PROTOID_ISCSI) {
+                       tc = dcbcfg->etscfg.prioritytable[app.priority];
+                       enabled_tc |= BIT_ULL(tc);
+                       break;
+               }
+       }
+
+       return enabled_tc;
+}
+
+/**
+ * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Return the number of TCs from given DCBx configuration
+ **/
+static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
+{
+       u8 num_tc = 0;
+       int i;
+
+       /* Scan the ETS Config Priority Table to find
+        * traffic class enabled for a given priority
+        * and use the traffic class index to get the
+        * number of traffic classes enabled
+        */
+       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+               if (dcbcfg->etscfg.prioritytable[i] > num_tc)
+                       num_tc = dcbcfg->etscfg.prioritytable[i];
+       }
+
+       /* Traffic class index starts from zero so
+        * increment to return the actual count
+        */
+       return num_tc + 1;
+}
+
+/**
+ * i40e_dcb_get_enabled_tc - Get enabled traffic classes
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Query the current DCB configuration and return the number of
+ * traffic classes enabled from the given DCBX config
+ **/
+static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
+{
+       u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
+       u8 enabled_tc = 1;
+       u8 i;
+
+       for (i = 0; i < num_tc; i++)
+               enabled_tc |= BIT(i);
+
+       return enabled_tc;
+}
+
+/**
+ * i40e_pf_get_num_tc - Get enabled traffic classes for PF
+ * @pf: PF being queried
+ *
+ * Return number of traffic classes enabled for the given PF
+ **/
+u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u8 i, enabled_tc;
+       u8 num_tc = 0;
+       struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+       /* If DCB is not enabled then always in single TC */
+       if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+               return 1;
+
+       /* SFP mode will be enabled for all TCs on port */
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               return i40e_dcb_get_num_tc(dcbcfg);
+
+       /* MFP mode return count of enabled TCs for this PF */
+       if (pf->hw.func_caps.iscsi)
+               enabled_tc =  i40e_get_iscsi_tc_map(pf);
+       else
+               return 1;/* Only TC0 */
+
+       /* At least have TC0 */
+       enabled_tc = (enabled_tc ? enabled_tc : 0x1);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (enabled_tc & BIT_ULL(i))
+                       num_tc++;
+       }
+       return num_tc;
+}
+
+/**
+ * i40e_pf_get_default_tc - Get bitmap for first enabled TC
+ * @pf: PF being queried
+ *
+ * Return a bitmap for first enabled traffic class for this PF.
+ **/
+static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
+{
+       u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
+       u8 i = 0;
+
+       if (!enabled_tc)
+               return 0x1; /* TC0 */
+
+       /* Find the first enabled TC */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (enabled_tc & BIT_ULL(i))
+                       break;
+       }
+
+       return BIT(i);
+}
+
+/**
+ * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
+ * @pf: PF being queried
+ *
+ * Return a bitmap for enabled traffic classes for this PF.
+ **/
+static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
+{
+       /* If DCB is not enabled for this PF then just return default TC */
+       if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+               return i40e_pf_get_default_tc(pf);
+
+       /* SFP mode we want PF to be enabled for all TCs */
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+
+       /* MFP enabled and iSCSI PF type */
+       if (pf->hw.func_caps.iscsi)
+               return i40e_get_iscsi_tc_map(pf);
+       else
+               return i40e_pf_get_default_tc(pf);
+}
+
+/**
+ * i40e_vsi_get_bw_info - Query VSI BW Information
+ * @vsi: the VSI being queried
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+{
+       struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
+       struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       i40e_status ret;
+       u32 tc_bw_max;
+       int i;
+
+       /* Get the VSI level BW configuration */
+       ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't get PF vsi bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               return -EINVAL;
+       }
+
+       /* Get the VSI level BW configuration per TC */
+       ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+                                                 NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               return -EINVAL;
+       }
+
+       if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
+               dev_info(&pf->pdev->dev,
+                        "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
+                        bw_config.tc_valid_bits,
+                        bw_ets_config.tc_valid_bits);
+               /* Still continuing */
+       }
+
+       vsi->bw_limit = LE16_TO_CPU(bw_config.port_bw_limit);
+       vsi->bw_max_quanta = bw_config.max_bw;
+       tc_bw_max = LE16_TO_CPU(bw_ets_config.tc_bw_max[0]) |
+                   (LE16_TO_CPU(bw_ets_config.tc_bw_max[1]) << 16);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
+               vsi->bw_ets_limit_credits[i] =
+                                       LE16_TO_CPU(bw_ets_config.credits[i]);
+               /* 3 bits out of 4 for each TC */
+               vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC bitmap
+ * @bw_credits: BW shared credits per TC
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
+                                      u8 *bw_share)
+{
+       struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+       i40e_status ret;
+       int i;
+
+       bw_data.tc_valid_bits = enabled_tc;
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               bw_data.tc_bw_credits[i] = bw_share[i];
+
+       ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+                                         NULL);
+       if (ret) {
+               dev_info(&vsi->back->pdev->dev,
+                        "AQ command Config VSI BW allocation per TC failed = %d\n",
+                        vsi->back->hw.aq.asq_last_status);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+               vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC map to be enabled
+ *
+ **/
+static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+       struct net_device *netdev = vsi->netdev;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u8 netdev_tc = 0;
+       int i;
+       struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+       if (!netdev)
+               return;
+
+       if (!enabled_tc) {
+               netdev_reset_tc(netdev);
+               return;
+       }
+
+       /* Set up actual enabled TCs on the VSI */
+       if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
+               return;
+
+       /* set per TC queues for the VSI */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               /* Only set TC queues for enabled tcs
+                *
+                * e.g. For a VSI that has TC0 and TC3 enabled the
+                * enabled_tc bitmap would be 0x00001001; the driver
+                * will set the numtc for netdev as 2 that will be
+                * referenced by the netdev layer as TC 0 and 1.
+                */
+               if (vsi->tc_config.enabled_tc & BIT_ULL(i))
+                       netdev_set_tc_queue(netdev,
+                                       vsi->tc_config.tc_info[i].netdev_tc,
+                                       vsi->tc_config.tc_info[i].qcount,
+                                       vsi->tc_config.tc_info[i].qoffset);
+       }
+
+       /* Assign UP2TC map for the VSI */
+       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+               /* Get the actual TC# for the UP */
+               u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
+               /* Get the mapped netdev TC# for the UP */
+               netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
+               netdev_set_prio_tc_map(netdev, i, netdev_tc);
+       }
+}
+
+/**
+ * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
+ * @vsi: the VSI being configured
+ * @ctxt: the ctxt buffer returned from AQ VSI update param command
+ **/
+static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
+                                     struct i40e_vsi_context *ctxt)
+{
+       /* copy just the sections touched not the entire info
+        * since not all sections are valid as returned by
+        * update vsi params
+        */
+       vsi->info.mapping_flags = ctxt->info.mapping_flags;
+       memcpy(&vsi->info.queue_mapping,
+              &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
+       memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
+              sizeof(vsi->info.tc_mapping));
+}
+
+/**
+ * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
+ * @vsi: VSI to be configured
+ * @enabled_tc: TC bitmap
+ *
+ * This configures a particular VSI for TCs that are mapped to the
+ * given TC bitmap. It uses default bandwidth share for TCs across
+ * VSIs to configure TC for a particular VSI.
+ *
+ * NOTE:
+ * It is expected that the VSI queues have been quisced before calling
+ * this function.
+ **/
+int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+       u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+       struct i40e_vsi_context ctxt;
+       int ret = 0;
+       int i;
+
+       /* Check if enabled_tc is same as existing or new TCs */
+       if (vsi->tc_config.enabled_tc == enabled_tc)
+               return ret;
+
+       /* Enable ETS TCs with equal BW Share for now across all VSIs */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (enabled_tc & BIT_ULL(i))
+                       bw_share[i] = 1;
+       }
+
+       ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+       if (ret) {
+               dev_info(&vsi->back->pdev->dev,
+                        "Failed configuring TC map %d for VSI %d\n",
+                        enabled_tc, vsi->seid);
+               goto out;
+       }
+
+       /* Update Queue Pairs Mapping for currently enabled UPs */
+       ctxt.seid = vsi->seid;
+       ctxt.pf_num = vsi->back->hw.pf_id;
+       ctxt.vf_num = 0;
+       ctxt.uplink_seid = vsi->uplink_seid;
+       ctxt.info = vsi->info;
+       i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+
+       /* Update the VSI after updating the VSI queue-mapping information */
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
+               dev_info(&vsi->back->pdev->dev,
+                        "Update vsi tc config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
+               goto out;
+       }
+       /* update the local VSI info with updated queue map */
+       i40e_vsi_update_queue_map(vsi, &ctxt);
+       vsi->info.valid_sections = 0;
+
+       /* Update current VSI BW information */
+       ret = i40e_vsi_get_bw_info(vsi);
+       if (ret) {
+               dev_info(&vsi->back->pdev->dev,
+                        "Failed updating vsi bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                     vsi->back->hw.aq.asq_last_status));
+               goto out;
+       }
+
+       /* Update the netdev TC setup */
+       i40e_vsi_config_netdev_tc(vsi, enabled_tc);
+out:
+       return ret;
+}
+
+/**
+ * i40e_veb_config_tc - Configure TCs for given VEB
+ * @veb: given VEB
+ * @enabled_tc: TC bitmap
+ *
+ * Configures given TC bitmap for VEB (switching) element
+ **/
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+{
+       struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
+       struct i40e_pf *pf = veb->pf;
+       int ret = 0;
+       int i;
+
+       /* No TCs or already enabled TCs just return */
+       if (!enabled_tc || veb->enabled_tc == enabled_tc)
+               return ret;
+
+       bw_data.tc_valid_bits = enabled_tc;
+       /* bw_data.absolute_credits is not set (relative) */
+
+       /* Enable ETS TCs with equal BW Share for now */
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               if (enabled_tc & BIT_ULL(i))
+                       bw_data.tc_bw_share_credits[i] = 1;
+       }
+
+       ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
+                                                  &bw_data, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "VEB bw config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               goto out;
+       }
+
+       /* Update the BW information */
+       ret = i40e_veb_get_bw_info(veb);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Failed getting veb bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+       }
+
+out:
+       return ret;
+}
+
+#ifdef CONFIG_DCB
+/**
+ * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
+ * @pf: PF struct
+ *
+ * Reconfigure VEB/VSIs on a given PF; it is assumed that
+ * the caller would've quiesce all the VSIs before calling
+ * this function
+ **/
+static void i40e_dcb_reconfigure(struct i40e_pf *pf)
+{
+       u8 tc_map = 0;
+       int ret;
+       u8 v;
+
+       /* Enable the TCs available on PF to all VEBs */
+       tc_map = i40e_pf_get_tc_map(pf);
+       for (v = 0; v < I40E_MAX_VEB; v++) {
+               if (!pf->veb[v])
+                       continue;
+               ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Failed configuring TC for VEB seid=%d\n",
+                                pf->veb[v]->seid);
+                       /* Will try to configure as many components */
+               }
+       }
+
+       /* Update each VSI */
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               if (!pf->vsi[v])
+                       continue;
+
+               /* - Enable all TCs for the LAN VSI
+#ifdef I40E_FCOE
+                * - For FCoE VSI only enable the TC configured
+                *   as per the APP TLV
+#endif
+                * - For all others keep them at TC0 for now
+                */
+               if (v == pf->lan_vsi)
+                       tc_map = i40e_pf_get_tc_map(pf);
+               else
+                       tc_map = i40e_pf_get_default_tc(pf);
+#ifdef I40E_FCOE
+               if (pf->vsi[v]->type == I40E_VSI_FCOE)
+                       tc_map = i40e_get_fcoe_tc_map(pf);
+#endif /* #ifdef I40E_FCOE */
+
+               ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Failed configuring TC for VSI seid=%d\n",
+                                pf->vsi[v]->seid);
+                       /* Will try to configure as many components */
+               } else {
+                       /* Re-configure VSI vectors based on updated TC map */
+                       i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
+#ifdef HAVE_DCBNL_IEEE
+                       if (pf->vsi[v]->netdev)
+                               i40e_dcbnl_set_all(pf->vsi[v]);
+#endif /* HAVE_DCBNL_IEEE */
+               }
+       }
+}
+
+/**
+ * i40e_resume_port_tx - Resume port Tx
+ * @pf: PF struct
+ *
+ * Resume a port's Tx and issue a PF reset in case of failure to
+ * resume.
+ **/
+static int i40e_resume_port_tx(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       int ret;
+
+       ret = i40e_aq_resume_port_tx(hw, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Resume Port Tx failed, err %s aq_err %s\n",
+                         i40e_stat_str(&pf->hw, ret),
+                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               /* Schedule PF reset to recover */
+               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+               i40e_service_event_schedule(pf);
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_init_pf_dcb - Initialize DCB configuration
+ * @pf: PF being configured
+ *
+ * Query the current DCB configuration and cache it
+ * in the hardware structure
+ **/
+static int i40e_init_pf_dcb(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       int err = 0;
+
+       /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4))
+               goto out;
+
+       /* Get the initial DCB configuration */
+       err = i40e_init_dcb(hw);
+       if (!err) {
+               /* Device/Function is not DCBX capable */
+               if ((!hw->func_caps.dcb) ||
+                   (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
+                       dev_info(&pf->pdev->dev,
+                                "DCBX offload is not supported or is disabled for this PF.\n");
+
+                       if (pf->flags & I40E_FLAG_MFP_ENABLED)
+                               goto out;
+               } else {
+                       /* When status is not DISABLED then DCBX in FW */
+                       pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+                                      DCB_CAP_DCBX_VER_IEEE;
+
+                       pf->flags |= I40E_FLAG_DCB_CAPABLE;
+                       /* Enable DCB tagging only when more than one TC */
+                       if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
+                               pf->flags |= I40E_FLAG_DCB_ENABLED;
+                       dev_dbg(&pf->pdev->dev,
+                                "DCBX offload is supported for this PF.\n");
+               }
+       } else {
+               dev_info(&pf->pdev->dev,
+                        "Query for DCB configuration failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+       }
+
+out:
+       return err;
+}
+
+#endif /* CONFIG_DCB */
+#define SPEED_SIZE 14
+#define FC_SIZE 8
+/**
+ * i40e_print_link_message - print link up or down
+ * @vsi: the VSI for which link needs a message
+ */
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+{
+       char *speed = "Unknown";
+       char *fc = "Unknown";
+
+       if (vsi->current_isup == isup)
+               return;
+
+       vsi->current_isup = isup;
+
+       if (!isup) {
+               netdev_info(vsi->netdev, "NIC Link is Down\n");
+               return;
+       }
+
+       /* Warn user if link speed on NPAR enabled partition is not at
+        * least 10GB
+        */
+       if (vsi->back->hw.func_caps.npar_enable &&
+           (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
+            vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
+               netdev_warn(vsi->netdev,
+                           "The partition detected link speed that is less than 10Gbps\n");
+
+       switch (vsi->back->hw.phy.link_info.link_speed) {
+       case I40E_LINK_SPEED_40GB:
+               speed = "40 G";
+               break;
+       case I40E_LINK_SPEED_20GB:
+               speed = "20 G";
+               break;
+       case I40E_LINK_SPEED_10GB:
+               speed = "10 G";
+               break;
+       case I40E_LINK_SPEED_1GB:
+               speed = "1000 M";
+               break;
+       case I40E_LINK_SPEED_100MB:
+               speed = "100 M";
+               break;
+       default:
+               break;
+       }
+
+       switch (vsi->back->hw.fc.current_mode) {
+       case I40E_FC_FULL:
+               fc = "RX/TX";
+               break;
+       case I40E_FC_TX_PAUSE:
+               fc = "TX";
+               break;
+       case I40E_FC_RX_PAUSE:
+               fc = "RX";
+               break;
+       default:
+               fc = "None";
+               break;
+       }
+
+       netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
+                   speed, fc);
+}
+
+/**
+ * i40e_up_complete - Finish the last steps of bringing up a connection
+ * @vsi: the VSI being configured
+ **/
+static int i40e_up_complete(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       int err;
+
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+               i40e_vsi_configure_msix(vsi);
+       else
+               i40e_configure_msi_and_legacy(vsi);
+
+       /* start rings */
+       err = i40e_vsi_control_rings(vsi, true);
+       if (err)
+               return err;
+
+       clear_bit(__I40E_DOWN, &vsi->state);
+       i40e_napi_enable_all(vsi);
+       i40e_vsi_enable_irq(vsi);
+
+       if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
+           (vsi->netdev)) {
+               i40e_print_link_message(vsi, true);
+               netif_tx_start_all_queues(vsi->netdev);
+               netif_carrier_on(vsi->netdev);
+       } else if (vsi->netdev) {
+               i40e_print_link_message(vsi, false);
+               /* need to check for qualified module here*/
+               if ((pf->hw.phy.link_info.link_info &
+                       I40E_AQ_MEDIA_AVAILABLE) &&
+                   (!(pf->hw.phy.link_info.an_info &
+                       I40E_AQ_QUALIFIED_MODULE)))
+                       netdev_err(vsi->netdev,
+                               "the driver failed to link because an unqualified module was detected.");
+       }
+
+       /* replay FDIR SB filters */
+       if (vsi->type == I40E_VSI_FDIR) {
+               /* reset fd counters */
+               pf->fd_add_err = pf->fd_atr_cnt = 0;
+               if (pf->fd_tcp_rule > 0) {
+                       pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+                       pf->fd_tcp_rule = 0;
+               }
+               i40e_fdir_filter_restore(vsi);
+       }
+       i40e_service_event_schedule(pf);
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_reinit_locked - Reset the VSI
+ * @vsi: the VSI being configured
+ *
+ * Rebuild the ring structs after some configuration
+ * has changed, e.g. MTU size.
+ **/
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+
+       WARN_ON(in_interrupt());
+       while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+               usleep_range(1000, 2000);
+       i40e_down(vsi);
+
+       /* Give a VF some time to respond to the reset.  The
+        * two second wait is based upon the watchdog cycle in
+        * the VF driver.
+        */
+       if (vsi->type == I40E_VSI_SRIOV)
+               msleep(2000);
+       i40e_up(vsi);
+       clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+}
+
+/**
+ * i40e_up - Bring the connection back up after being down
+ * @vsi: the VSI being configured
+ **/
+int i40e_up(struct i40e_vsi *vsi)
+{
+       int err;
+
+       err = i40e_vsi_configure(vsi);
+       if (!err)
+               err = i40e_up_complete(vsi);
+
+       return err;
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @vsi: the VSI being stopped
+ **/
+void i40e_down(struct i40e_vsi *vsi)
+{
+       int i;
+
+       /* It is assumed that the caller of this function
+        * sets the vsi->state __I40E_DOWN bit.
+        */
+       if (vsi->netdev) {
+               netif_carrier_off(vsi->netdev);
+               netif_tx_disable(vsi->netdev);
+       }
+       i40e_vsi_disable_irq(vsi);
+       i40e_vsi_control_rings(vsi, false);
+       i40e_napi_disable_all(vsi);
+
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               i40e_clean_tx_ring(vsi->tx_rings[i]);
+               i40e_clean_rx_ring(vsi->rx_rings[i]);
+       }
+
+}
+
+#ifdef HAVE_SETUP_TC
+/**
+ * i40e_setup_tc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ **/
+#ifdef I40E_FCOE
+int i40e_setup_tc(struct net_device *netdev, u8 tc)
+#else
+static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       u8 enabled_tc = 0;
+       int ret = -EINVAL;
+       int i;
+
+       /* Check if DCB enabled to continue */
+       if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+               netdev_info(netdev, "DCB is not enabled for adapter\n");
+               goto exit;
+       }
+
+       /* Check if MFP enabled */
+       if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+               netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
+               goto exit;
+       }
+
+       /* Check whether tc count is within enabled limit */
+       if (tc > i40e_pf_get_num_tc(pf)) {
+               netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
+               goto exit;
+       }
+
+       /* Generate TC map for number of tc requested */
+       for (i = 0; i < tc; i++)
+               enabled_tc |= BIT_ULL(i);
+
+       /* Requesting same TC configuration as already enabled */
+       if (enabled_tc == vsi->tc_config.enabled_tc)
+               return 0;
+
+       /* Quiesce VSI queues */
+       i40e_quiesce_vsi(vsi);
+
+       /* Configure VSI for enabled TCs */
+       ret = i40e_vsi_config_tc(vsi, enabled_tc);
+       if (ret) {
+               netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
+                           vsi->seid);
+               goto exit;
+       }
+
+       /* Unquiesce VSI */
+       i40e_unquiesce_vsi(vsi);
+
+exit:
+       return ret;
+}
+#endif /* HAVE_SETUP_TC */
+
+/**
+ * i40e_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog subtask is
+ * enabled, and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+int i40e_open(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       int err;
+
+       /* disallow open during test or if eeprom is broken */
+       if (test_bit(__I40E_TESTING, &pf->state) ||
+           test_bit(__I40E_BAD_EEPROM, &pf->state))
+               return -EBUSY;
+
+       netif_carrier_off(netdev);
+
+       err = i40e_vsi_open(vsi);
+       if (err)
+               return err;
+       /* configure global TSO hardware offload settings */
+       wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
+                                                      TCP_FLAG_FIN) >> 16);
+       wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
+                                                      TCP_FLAG_FIN |
+                                                      TCP_FLAG_CWR) >> 16);
+       wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
+
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+#ifdef HAVE_VXLAN_CHECKS
+       vxlan_get_rx_port(netdev);
+#else
+#if IS_ENABLED(CONFIG_VXLAN)
+       vxlan_get_rx_port(netdev);
+#endif
+#endif /* HAVE_VXLAN_CHECKS */
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_open -
+ * @vsi: the VSI to open
+ *
+ * Finish initialization of the VSI.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+int i40e_vsi_open(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       char int_name[I40E_INT_NAME_STR_LEN];
+       int err;
+
+       /* allocate descriptors */
+       err = i40e_vsi_setup_tx_resources(vsi);
+       if (err)
+               goto err_setup_tx;
+       err = i40e_vsi_setup_rx_resources(vsi);
+       if (err)
+               goto err_setup_rx;
+
+       err = i40e_vsi_configure(vsi);
+       if (err)
+               goto err_setup_rx;
+
+       if (vsi->netdev) {
+               snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+                        dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+               err = i40e_vsi_request_irq(vsi, int_name);
+               if (err)
+                       goto err_setup_rx;
+
+               /* Notify the stack of the actual queue counts. */
+               netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
+
+               err = netif_set_real_num_rx_queues(vsi->netdev,
+                                                  vsi->num_queue_pairs);
+               if (err)
+                       goto err_set_queues;
+
+       } else if (vsi->type == I40E_VSI_FDIR) {
+               snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
+                        dev_driver_string(&pf->pdev->dev),
+                        dev_name(&pf->pdev->dev));
+               err = i40e_vsi_request_irq(vsi, int_name);
+
+       } else {
+               err = -EINVAL;
+               goto err_setup_rx;
+       }
+
+       err = i40e_up_complete(vsi);
+       if (err)
+               goto err_up_complete;
+
+       return 0;
+
+err_up_complete:
+       i40e_down(vsi);
+err_set_queues:
+       i40e_vsi_free_irq(vsi);
+err_setup_rx:
+       i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+       i40e_vsi_free_tx_resources(vsi);
+       if (vsi == pf->vsi[pf->lan_vsi])
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+
+       return err;
+}
+
+/**
+ * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
+ * @pf: Pointer to PF
+ *
+ * This function destroys the hlist where all the Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_exit(struct i40e_pf *pf)
+{
+       struct i40e_fdir_filter *filter;
+       struct hlist_node *node2;
+
+       hlist_for_each_entry_safe(filter, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               hlist_del(&filter->fdir_node);
+               kfree(filter);
+       }
+       pf->fdir_pf_active_filters = 0;
+}
+
+/**
+ * i40e_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the driver's control, but
+ * this netdev interface is disabled.
+ *
+ * Returns 0, this is not allowed to fail
+ **/
+#ifdef I40E_FCOE
+int i40e_close(struct net_device *netdev)
+#else
+static int i40e_close(struct net_device *netdev)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+
+       i40e_vsi_close(vsi);
+
+       return 0;
+}
+
+/**
+ * i40e_do_reset - Start a PF or Core Reset sequence
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ * The essential difference in resets is that the PF Reset
+ * doesn't clear the packet buffers, doesn't reset the PE
+ * firmware, and doesn't bother the other PFs on the chip.
+ **/
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+{
+       u32 val;
+
+       WARN_ON(in_interrupt());
+
+       if (i40e_check_asq_alive(&pf->hw))
+               i40e_vc_notify_reset(pf);
+
+       /* do the biggest reset indicated */
+       if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
+
+               /* Request a Global Reset
+                *
+                * This will start the chip's countdown to the actual full
+                * chip reset event, and a warning interrupt to be sent
+                * to all PFs, including the requestor.  Our handler
+                * for the warning interrupt will deal with the shutdown
+                * and recovery of the switch setup.
+                */
+               dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
+               val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+               val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
+               wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+
+       } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
+
+               /* Request a Core Reset
+                *
+                * Same as Global Reset, except does *not* include the MAC/PHY
+                */
+               dev_dbg(&pf->pdev->dev, "CoreR requested\n");
+               val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+               val |= I40E_GLGEN_RTRIG_CORER_MASK;
+               wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+               i40e_flush(&pf->hw);
+
+       } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
+
+               /* Request a PF Reset
+                *
+                * Resets only the PF-specific registers
+                *
+                * This goes directly to the tear-down and rebuild of
+                * the switch, since we need to do all the recovery as
+                * for the Core Reset.
+                */
+               dev_dbg(&pf->pdev->dev, "PFR requested\n");
+               i40e_handle_reset_warning(pf);
+
+       } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
+               int v;
+
+               /* Find the VSI(s) that requested a re-init */
+               dev_info(&pf->pdev->dev,
+                        "VSI reinit requested\n");
+               for (v = 0; v < pf->num_alloc_vsi; v++) {
+                       struct i40e_vsi *vsi = pf->vsi[v];
+
+                       if (vsi != NULL &&
+                           test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
+                               i40e_vsi_reinit_locked(pf->vsi[v]);
+                               clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
+                       }
+               }
+       } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
+               int v;
+
+               /* Find the VSI(s) that needs to be brought down */
+               dev_info(&pf->pdev->dev, "VSI down requested\n");
+               for (v = 0; v < pf->num_alloc_vsi; v++) {
+                       struct i40e_vsi *vsi = pf->vsi[v];
+
+                       if (vsi != NULL &&
+                           test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
+                               set_bit(__I40E_DOWN, &vsi->state);
+                               i40e_down(vsi);
+                               clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
+                       }
+               }
+       } else {
+               dev_info(&pf->pdev->dev,
+                        "bad reset request 0x%08x\n", reset_flags);
+       }
+}
+
+/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+       rtnl_lock();
+       i40e_do_reset(pf, reset_flags);
+       rtnl_unlock();
+}
+
+#ifdef CONFIG_DCB
+/**
+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ **/
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+                           struct i40e_dcbx_config *old_cfg,
+                           struct i40e_dcbx_config *new_cfg)
+{
+       bool need_reconfig = false;
+
+       /* Check if ETS configuration has changed */
+       if (memcmp(&new_cfg->etscfg,
+                  &old_cfg->etscfg,
+                  sizeof(new_cfg->etscfg))) {
+               /* If Priority Table has changed reconfig is needed */
+               if (memcmp(&new_cfg->etscfg.prioritytable,
+                          &old_cfg->etscfg.prioritytable,
+                          sizeof(new_cfg->etscfg.prioritytable))) {
+                       need_reconfig = true;
+                       dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+               }
+
+               if (memcmp(&new_cfg->etscfg.tcbwtable,
+                          &old_cfg->etscfg.tcbwtable,
+                          sizeof(new_cfg->etscfg.tcbwtable)))
+                       dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+               if (memcmp(&new_cfg->etscfg.tsatable,
+                          &old_cfg->etscfg.tsatable,
+                          sizeof(new_cfg->etscfg.tsatable)))
+                       dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+       }
+
+       /* Check if PFC configuration has changed */
+       if (memcmp(&new_cfg->pfc,
+                  &old_cfg->pfc,
+                  sizeof(new_cfg->pfc))) {
+               need_reconfig = true;
+               dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+       }
+
+       /* Check if APP Table has changed */
+       if (memcmp(&new_cfg->app,
+                  &old_cfg->app,
+                  sizeof(new_cfg->app))) {
+               need_reconfig = true;
+               dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+       }
+
+       dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+       return need_reconfig;
+}
+
+/**
+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static int i40e_handle_lldp_event(struct i40e_pf *pf,
+                                 struct i40e_arq_event_info *e)
+{
+       struct i40e_aqc_lldp_get_mib *mib =
+               (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_dcbx_config tmp_dcbx_cfg;
+       bool need_reconfig = false;
+       int ret = 0;
+       u8 type;
+
+       /* Not DCB capable or capability disabled */
+       if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+               return ret;
+
+       /* Ignore if event is not for Nearest Bridge */
+       type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
+               & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+       dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
+       if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+               return ret;
+
+       /* Check MIB Type and return if event for Remote MIB update */
+       type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+       dev_dbg(&pf->pdev->dev,
+               "LLDP event mib type %s\n", type ? "remote" : "local");
+       if (type == I40E_AQ_LLDP_MIB_REMOTE) {
+               /* Update the remote cached instance and return */
+               ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+                               I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+                               &hw->remote_dcbx_config);
+               goto exit;
+       }
+
+       /* Store the old configuration */
+       tmp_dcbx_cfg = hw->local_dcbx_config;
+
+       /* Reset the old DCBx configuration data */
+       memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
+       /* Get updated DCBX data from firmware */
+       ret = i40e_get_dcb_config(&pf->hw);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               goto exit;
+       }
+
+       /* No change detected in DCBX configs */
+       if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
+                   sizeof(tmp_dcbx_cfg))) {
+               dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+               goto exit;
+       }
+
+       need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
+                                              &hw->local_dcbx_config);
+
+#ifdef HAVE_DCBNL_IEEE
+       i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
+#endif /* HAVE_DCBNL_IEEE */
+
+       if (!need_reconfig)
+               goto exit;
+
+       /* Enable DCB tagging only when more than one TC */
+       if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
+               pf->flags |= I40E_FLAG_DCB_ENABLED;
+       else
+               pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
+       set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+       /* Reconfiguration needed quiesce all VSIs */
+       i40e_pf_quiesce_all_vsi(pf);
+
+       /* Changes in configuration update VEB/VSI */
+       i40e_dcb_reconfigure(pf);
+
+       ret = i40e_resume_port_tx(pf);
+
+       clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+       /* In case of error no point in resuming VSIs */
+       if (ret)
+               goto exit;
+
+       /* Wait for the PF's Tx queues to be disabled */
+       ret = i40e_pf_wait_txq_disabled(pf);
+       if (ret) {
+               /* Schedule PF reset to recover */
+               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+               i40e_service_event_schedule(pf);
+       } else {
+               i40e_pf_unquiesce_all_vsi(pf);
+       }
+
+exit:
+       return ret;
+}
+
+#endif /* CONFIG_DCB */
+/**
+ * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ *
+ * Handler for LAN Queue Overflow Event generated by the firmware for PF
+ * and VF queues
+ **/
+static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
+                                          struct i40e_arq_event_info *e)
+{
+       struct i40e_aqc_lan_overflow *data =
+               (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+       u32 queue = le32_to_cpu(data->prtdcb_rupto);
+       u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_vf *vf;
+       u16 vf_id;
+
+       dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
+               queue, qtx_ctl);
+
+       /* Queue belongs to VF, find the VF and issue VF reset */
+       if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
+           >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
+               vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
+                        >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
+               vf_id -= hw->func_caps.vf_base_id;
+               vf = &pf->vf[vf_id];
+               i40e_vc_notify_vf_reset(vf);
+               /* Allow VF to process pending reset notification */
+               msleep(20);
+               i40e_reset_vf(vf, false);
+       }
+}
+
+/**
+ * i40e_service_event_complete - Finish up the service event
+ * @pf: board private structure
+ **/
+static void i40e_service_event_complete(struct i40e_pf *pf)
+{
+       BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
+
+       /* flush memory to make sure state is correct before next watchdog */
+       smp_mb__before_atomic();
+       clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+}
+
+/**
+ * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
+ * @pf: board private structure
+ **/
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+{
+       u32 val, fcnt_prog;
+
+       val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+       fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
+       return fcnt_prog;
+}
+
+/**
+ * i40e_get_current_fd_count - Get total FD filters programmed for this PF
+ * @pf: board private structure
+ **/
+u32 i40e_get_current_fd_count(struct i40e_pf *pf)
+{
+       u32 val, fcnt_prog;
+
+       val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+       fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
+                   ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+                     I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+       return fcnt_prog;
+}
+
+/**
+ * i40e_get_global_fd_count - Get total FD filters programmed on device
+ * @pf: board private structure
+ **/
+u32 i40e_get_global_fd_count(struct i40e_pf *pf)
+{
+       u32 val, fcnt_prog;
+
+       val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
+       fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
+                   ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
+                     I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+       return fcnt_prog;
+}
+
+/**
+ * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
+ * @pf: board private structure
+ **/
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
+{
+       struct i40e_fdir_filter *filter;
+       u32 fcnt_prog, fcnt_avail;
+       struct hlist_node *node;
+
+       if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+               return;
+
+       /* Check if, FD SB or ATR was auto disabled and if there is enough room
+        * to re-enable
+        */
+       fcnt_prog = i40e_get_global_fd_count(pf);
+       fcnt_avail = pf->fdir_pf_filter_count;
+       if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
+           (pf->fd_add_err == 0) ||
+           (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
+               if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+               }
+       }
+       /* Wait for some more space to be available to turn on ATR */
+       if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
+               if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+               }
+       }
+
+       /* if hw had a problem adding a filter, delete it */
+       if (pf->fd_inv > 0) {
+               hlist_for_each_entry_safe(filter, node,
+                                         &pf->fdir_filter_list, fdir_node) {
+                       if (filter->fd_id == pf->fd_inv) {
+                               hlist_del(&filter->fdir_node);
+                               kfree(filter);
+                               pf->fdir_pf_active_filters--;
+                       }
+               }
+       }
+}
+
+#define I40E_MIN_FD_FLUSH_INTERVAL 10
+#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
+/**
+ * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
+ * @pf: board private structure
+ **/
+static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
+{
+       unsigned long min_flush_time;
+       int flush_wait_retry = 50;
+       bool disable_atr = false;
+       int fd_room;
+       int reg;
+
+       if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
+               return;
+
+       if (!time_after(jiffies, pf->fd_flush_timestamp +
+                                (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
+               return;
+
+       /* If the flush is happening too quick and we have mostly SB rules we
+        * should not re-enable ATR for some time.
+        */
+       min_flush_time = pf->fd_flush_timestamp +
+                        (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+       fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+
+       if (!(time_after(jiffies, min_flush_time)) &&
+           (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                       dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+               disable_atr = true;
+       }
+
+       pf->fd_flush_timestamp = jiffies;
+       pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+       /* flush all filters */
+       wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+       i40e_flush(&pf->hw);
+       pf->fd_flush_cnt++;
+       pf->fd_add_err = 0;
+       do {
+               /* Check FD flush status every 5-6msec */
+               usleep_range(5000, 6000);
+               reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+               if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+                       break;
+       } while (flush_wait_retry--);
+
+       if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+               dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+       } else {
+               /* replay sideband filters */
+               i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+               if (!disable_atr)
+                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+               clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                       dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+       }
+}
+
+/**
+ * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
+ * @pf: board private structure
+ **/
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
+{
+       return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
+}
+
+/* We can see up to 256 filter programming desc in transit if the filters are
+ * being applied really fast; before we see the first
+ * filter miss error on rx queue 0. Accumulating enough error messages before
+ * reacting will make sure we don't cause flush too often.
+ */
+#define I40E_MAX_FD_PROGRAM_ERROR 256
+/**
+ * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
+ * @pf: board private structure
+ **/
+static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
+{
+
+       /* if interface is down do nothing */
+       if (test_bit(__I40E_DOWN, &pf->state))
+               return;
+
+       if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
+               return;
+
+       if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+               i40e_fdir_flush_and_replay(pf);
+
+       i40e_fdir_check_and_reenable(pf);
+
+}
+
+/**
+ * i40e_vsi_link_event - notify VSI of a link event
+ * @vsi: vsi to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
+{
+       if (!vsi || (test_bit(__I40E_DOWN, &vsi->state)))
+               return;
+
+       switch (vsi->type) {
+       case I40E_VSI_MAIN:
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+#endif
+               if (!vsi->netdev || !vsi->netdev_registered)
+                       break;
+
+               if (link_up) {
+                       netif_carrier_on(vsi->netdev);
+                       netif_tx_wake_all_queues(vsi->netdev);
+               } else {
+                       netif_carrier_off(vsi->netdev);
+                       netif_tx_stop_all_queues(vsi->netdev);
+               }
+               break;
+
+       case I40E_VSI_SRIOV:
+       case I40E_VSI_VMDQ2:
+       case I40E_VSI_CTRL:
+       case I40E_VSI_MIRROR:
+       default:
+               /* there is no notification for other VSIs */
+               break;
+       }
+}
+
+/**
+ * i40e_veb_link_event - notify elements on the veb of a link event
+ * @veb: veb to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
+{
+       struct i40e_pf *pf;
+       int i;
+
+       if (!veb || !veb->pf)
+               return;
+       pf = veb->pf;
+
+       /* depth first... */
+       for (i = 0; i < I40E_MAX_VEB; i++)
+               if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
+                       i40e_veb_link_event(pf->veb[i], link_up);
+
+       /* ... now the local VSIs */
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
+                       i40e_vsi_link_event(pf->vsi[i], link_up);
+}
+
+/**
+ * i40e_link_event - Update netif_carrier status
+ * @pf: board private structure
+ **/
+static void i40e_link_event(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       u8 new_link_speed, old_link_speed;
+       i40e_status status;
+       bool new_link, old_link;
+
+       /* set this to force the get_link_status call to refresh state */
+       pf->hw.phy.get_link_info = true;
+
+       old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+
+       status = i40e_get_link_status(&pf->hw, &new_link);
+       if (status != I40E_SUCCESS) {
+               dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
+                       status);
+               return;
+       }
+
+       old_link_speed = pf->hw.phy.link_info_old.link_speed;
+       new_link_speed = pf->hw.phy.link_info.link_speed;
+
+       if (new_link == old_link &&
+           new_link_speed == old_link_speed &&
+           (test_bit(__I40E_DOWN, &vsi->state) ||
+            new_link == netif_carrier_ok(vsi->netdev)))
+               return;
+
+       if (!test_bit(__I40E_DOWN, &vsi->state))
+               i40e_print_link_message(vsi, new_link);
+
+       /* Notify the base of the switch tree connected to
+        * the link.  Floating VEBs are not notified.
+        */
+       if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+               i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+       else
+               i40e_vsi_link_event(vsi, new_link);
+
+       if (pf->vf)
+               i40e_vc_notify_link_state(pf);
+#ifdef HAVE_PTP_1588_CLOCK
+
+       if (pf->flags & I40E_FLAG_PTP)
+               i40e_ptp_set_increment(pf);
+#endif /* HAVE_PTP_1588_CLOCK */
+}
+
+/**
+ * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
+ * @pf: board private structure
+ *
+ * Set the per-queue flags to request a check for stuck queues in the irq
+ * clean functions, then force interrupts to be sure the irq clean is called.
+ **/
+static void i40e_check_hang_subtask(struct i40e_pf *pf)
+{
+       int i, v;
+
+       /* If we're down or resetting, just bail */
+       if (test_bit(__I40E_DOWN, &pf->state) ||
+           test_bit(__I40E_CONFIG_BUSY, &pf->state))
+               return;
+
+       /* for each VSI/netdev
+        *     for each Tx queue
+        *         set the check flag
+        *     for each q_vector
+        *         force an interrupt
+        */
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               struct i40e_vsi *vsi = pf->vsi[v];
+               int armed = 0;
+
+               if (!pf->vsi[v] ||
+                   test_bit(__I40E_DOWN, &vsi->state) ||
+                   (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
+                       continue;
+
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       set_check_for_tx_hang(vsi->tx_rings[i]);
+                       if (test_bit(__I40E_HANG_CHECK_ARMED,
+                                    &vsi->tx_rings[i]->state))
+                               armed++;
+               }
+
+               if (armed) {
+                       if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+                               wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
+                                    (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+                                     I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+                                     I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
+                                     I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
+                                     I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
+                       } else {
+                               u16 vec = vsi->base_vector - 1;
+                               u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                                     I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                                     I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
+                                     I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
+                                     I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
+                               for (i = 0; i < vsi->num_q_vectors; i++, vec++)
+                                       wr32(&vsi->back->hw,
+                                            I40E_PFINT_DYN_CTLN(vec), val);
+                       }
+                       i40e_flush(&vsi->back->hw);
+               }
+       }
+}
+
+/**
+ * i40e_watchdog_subtask - periodic checks not using event driven response
+ * @pf: board private structure
+ **/
+static void i40e_watchdog_subtask(struct i40e_pf *pf)
+{
+       int i;
+
+       /* if interface is down do nothing */
+       if (test_bit(__I40E_DOWN, &pf->state) ||
+           test_bit(__I40E_CONFIG_BUSY, &pf->state))
+               return;
+
+       /* make sure we don't do these things too often */
+       if (time_before(jiffies,
+                       (pf->service_timer_previous + pf->service_timer_period)))
+               return;
+       pf->service_timer_previous = jiffies;
+
+       i40e_check_hang_subtask(pf);
+       if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+               i40e_link_event(pf);
+
+       /* Update the stats for active netdevs so the network stack
+        * can look at updated numbers whenever it cares to
+        */
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i] && pf->vsi[i]->netdev)
+                       i40e_update_stats(pf->vsi[i]);
+
+       if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+               /* Update the stats for the active switching components */
+               for (i = 0; i < I40E_MAX_VEB; i++)
+                       if (pf->veb[i])
+                               i40e_update_veb_stats(pf->veb[i]);
+       }
+#ifdef HAVE_PTP_1588_CLOCK
+
+       i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
+#endif /* HAVE_PTP_1588_CLOCK */
+}
+
+/**
+ * i40e_reset_subtask - Set up for resetting the device and driver
+ * @pf: board private structure
+ **/
+static void i40e_reset_subtask(struct i40e_pf *pf)
+{
+       u32 reset_flags = 0;
+
+       rtnl_lock();
+       if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
+               reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
+               clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
+       }
+       if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
+               reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
+               clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+       }
+       if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
+               reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
+               clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+       }
+       if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
+               reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
+               clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+       }
+       if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
+               reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
+               clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
+       }
+
+       /* If there's a recovery already waiting, it takes
+        * precedence before starting a new reset sequence.
+        */
+       if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
+               i40e_handle_reset_warning(pf);
+               goto unlock;
+       }
+
+       /* If we're already down or resetting, just bail */
+       if (reset_flags &&
+           !test_bit(__I40E_DOWN, &pf->state) &&
+           !test_bit(__I40E_CONFIG_BUSY, &pf->state))
+               i40e_do_reset(pf, reset_flags);
+
+unlock:
+       rtnl_unlock();
+}
+
+/**
+ * i40e_handle_link_event - Handle link event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static void i40e_handle_link_event(struct i40e_pf *pf,
+                                  struct i40e_arq_event_info *e)
+{
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_aqc_get_link_status *status =
+               (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+
+       /* save off old link status information */
+       hw->phy.link_info_old = hw->phy.link_info;
+
+       /* Do a new status request to re-enable LSE reporting
+        * and load new status information into the hw struct
+        * This completely ignores any state information
+        * in the ARQ event info, instead choosing to always
+        * issue the AQ update link status command.
+        */
+       i40e_link_event(pf);
+
+       /* check for unqualified module, if link is down */
+       if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+           (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+           (!(status->link_info & I40E_AQ_LINK_UP)))
+               dev_err(&pf->pdev->dev,
+                       "The driver failed to link because an unqualified module was detected.\n");
+}
+
+/**
+ * i40e_clean_adminq_subtask - Clean the AdminQ rings
+ * @pf: board private structure
+ **/
+static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
+{
+       struct i40e_arq_event_info event;
+       struct i40e_hw *hw = &pf->hw;
+       u16 pending, i = 0;
+       i40e_status ret;
+       u16 opcode;
+       u32 oldval;
+       u32 val;
+
+       /* Do not run clean AQ when PF reset fails */
+       if (test_bit(__I40E_RESET_FAILED, &pf->state))
+               return;
+
+       /* check for error indications */
+       val = rd32(&pf->hw, pf->hw.aq.arq.len);
+       oldval = val;
+       if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
+               dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+               val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
+       }
+       if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
+               dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+               val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
+       }
+       if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
+               dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+               val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
+       }
+       if (oldval != val)
+               wr32(&pf->hw, pf->hw.aq.arq.len, val);
+
+       val = rd32(&pf->hw, pf->hw.aq.asq.len);
+       oldval = val;
+       if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
+               dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+               val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
+       }
+       if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
+               dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+               val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
+       }
+       if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
+               dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+               val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
+       }
+       if (oldval != val)
+               wr32(&pf->hw, pf->hw.aq.asq.len, val);
+
+       event.buf_len = I40E_MAX_AQ_BUF_SIZE;
+       event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+       if (!event.msg_buf)
+               return;
+
+       do {
+               ret = i40e_clean_arq_element(hw, &event, &pending);
+               if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+                       break;
+               else if (ret) {
+                       dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
+                       break;
+               }
+
+               opcode = LE16_TO_CPU(event.desc.opcode);
+               switch (opcode) {
+
+               case i40e_aqc_opc_get_link_status:
+                       i40e_handle_link_event(pf, &event);
+                       break;
+               case i40e_aqc_opc_send_msg_to_pf:
+                       ret = i40e_vc_process_vf_msg(pf,
+                                       le16_to_cpu(event.desc.retval),
+                                       le32_to_cpu(event.desc.cookie_high),
+                                       le32_to_cpu(event.desc.cookie_low),
+                                       event.msg_buf,
+                                       event.msg_len);
+                       break;
+               case i40e_aqc_opc_lldp_update_mib:
+                       dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+#ifdef CONFIG_DCB
+                       rtnl_lock();
+                       ret = i40e_handle_lldp_event(pf, &event);
+                       rtnl_unlock();
+#endif /* CONFIG_DCB */
+                       break;
+               case i40e_aqc_opc_event_lan_overflow:
+                       dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+                       i40e_handle_lan_overflow_event(pf, &event);
+                       break;
+               case i40e_aqc_opc_send_msg_to_peer:
+                       dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+                       break;
+               case i40e_aqc_opc_nvm_erase:
+               case i40e_aqc_opc_nvm_update:
+                       i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
+                       break;
+               default:
+                       dev_info(&pf->pdev->dev,
+                                "ARQ Error: Unknown event 0x%04x received\n",
+                                opcode);
+                       break;
+               }
+       } while (pending && (i++ < pf->adminq_work_limit));
+
+       clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+       /* re-enable Admin queue interrupt cause */
+       val = rd32(hw, I40E_PFINT_ICR0_ENA);
+       val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+       wr32(hw, I40E_PFINT_ICR0_ENA, val);
+       i40e_flush(hw);
+
+       kfree(event.msg_buf);
+}
+
+/**
+ * i40e_verify_eeprom - make sure eeprom is good to use
+ * @pf: board private structure
+ **/
+static void i40e_verify_eeprom(struct i40e_pf *pf)
+{
+       int err;
+
+       err = i40e_diag_eeprom_test(&pf->hw);
+       if (err) {
+               /* retry in case of garbage read */
+               err = i40e_diag_eeprom_test(&pf->hw);
+               if (err) {
+                       dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
+                                err);
+                       set_bit(__I40E_BAD_EEPROM, &pf->state);
+               }
+       }
+
+       if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+               dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
+               clear_bit(__I40E_BAD_EEPROM, &pf->state);
+       }
+}
+
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_vsi_context ctxt;
+       int ret;
+
+       ctxt.seid = pf->main_vsi_seid;
+       ctxt.pf_num = pf->hw.pf_id;
+       ctxt.vf_num = 0;
+       ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't get PF vsi config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               return;
+       }
+       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+       ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "update vsi switch failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+       }
+}
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_vsi_context ctxt;
+       int ret;
+
+       ctxt.seid = pf->main_vsi_seid;
+       ctxt.pf_num = pf->hw.pf_id;
+       ctxt.vf_num = 0;
+       ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't get PF vsi config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               return;
+       }
+       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+       ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "update vsi switch failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+       }
+}
+
+/**
+ * i40e_config_bridge_mode - Configure the HW bridge mode
+ * @veb: pointer to the bridge instance
+ *
+ * Configure the loop back mode for the LAN VSI that is downlink to the
+ * specified HW bridge instance. It is expected this function is called
+ * when a new HW bridge is instantiated.
+ **/
+static void i40e_config_bridge_mode(struct i40e_veb *veb)
+{
+       struct i40e_pf *pf = veb->pf;
+
+#ifdef HAVE_BRIDGE_ATTRIBS
+       if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+               dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+                        veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+       if (veb->bridge_mode & BRIDGE_MODE_VEPA)
+               i40e_disable_pf_switch_lb(pf);
+       else
+               i40e_enable_pf_switch_lb(pf);
+#else
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               i40e_enable_pf_switch_lb(pf);
+       else
+               i40e_disable_pf_switch_lb(pf);
+#endif
+}
+
+/**
+ * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * @veb: pointer to the VEB instance
+ *
+ * This is a recursive function that first builds the attached VSIs then
+ * recurses in to build the next layer of VEB.  We track the connections
+ * through our own index numbers because the seid's from the HW could
+ * change across the reset.
+ **/
+static int i40e_reconstitute_veb(struct i40e_veb *veb)
+{
+       struct i40e_vsi *ctl_vsi = NULL;
+       struct i40e_pf *pf = veb->pf;
+       int v, veb_idx;
+       int ret;
+
+       /* build VSI that owns this VEB, temporarily attached to base VEB */
+       for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
+               if (pf->vsi[v] &&
+                   pf->vsi[v]->veb_idx == veb->idx &&
+                   pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
+                       ctl_vsi = pf->vsi[v];
+                       break;
+               }
+       }
+       if (!ctl_vsi) {
+               dev_info(&pf->pdev->dev,
+                        "missing owner VSI for veb_idx %d\n", veb->idx);
+               ret = -ENOENT;
+               goto end_reconstitute;
+       }
+       if (ctl_vsi != pf->vsi[pf->lan_vsi])
+               ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+       ret = i40e_add_vsi(ctl_vsi);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "rebuild of veb_idx %d owner VSI failed: %d\n",
+                        veb->idx, ret);
+               goto end_reconstitute;
+       }
+       i40e_vsi_reset_stats(ctl_vsi);
+
+       /* create the VEB in the switch and move the VSI onto the VEB */
+       ret = i40e_add_veb(veb, ctl_vsi);
+       if (ret)
+               goto end_reconstitute;
+
+#ifdef HAVE_BRIDGE_ATTRIBS
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               veb->bridge_mode = BRIDGE_MODE_VEB;
+       else
+               veb->bridge_mode = BRIDGE_MODE_VEPA;
+#endif
+       i40e_config_bridge_mode(veb);
+
+       /* create the remaining VSIs attached to this VEB */
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+                       continue;
+
+               if (pf->vsi[v]->veb_idx == veb->idx) {
+                       struct i40e_vsi *vsi = pf->vsi[v];
+
+                       vsi->uplink_seid = veb->seid;
+                       ret = i40e_add_vsi(vsi);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "rebuild of vsi_idx %d failed: %d\n",
+                                        v, ret);
+                               goto end_reconstitute;
+                       }
+                       i40e_vsi_reset_stats(vsi);
+               }
+       }
+
+       /* create any VEBs attached to this VEB - RECURSION */
+       for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+               if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
+                       pf->veb[veb_idx]->uplink_seid = veb->seid;
+                       ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
+                       if (ret)
+                               break;
+               }
+       }
+
+end_reconstitute:
+       return ret;
+}
+
+/**
+ * i40e_get_capabilities - get info about the HW
+ * @pf: the PF struct
+ **/
+static int i40e_get_capabilities(struct i40e_pf *pf)
+{
+       struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+       u16 data_size;
+       int buf_len;
+       int err;
+
+       buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+       do {
+               cap_buf = kzalloc(buf_len, GFP_KERNEL);
+               if (!cap_buf)
+                       return -ENOMEM;
+
+               /* this loads the data into the hw struct for us */
+               err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
+                                           &data_size,
+                                           i40e_aqc_opc_list_func_capabilities,
+                                           NULL);
+               /* data loaded, buffer no longer needed */
+               kfree(cap_buf);
+
+               if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+                       /* retry with a larger buffer */
+                       buf_len = data_size;
+               } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+                       dev_info(&pf->pdev->dev,
+                                "capability discovery failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, err),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
+                       return -ENODEV;
+               }
+       } while (err);
+
+       if (pf->hw.debug_mask & I40E_DEBUG_USER)
+               dev_info(&pf->pdev->dev,
+                        "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
+                        pf->hw.pf_id, pf->hw.func_caps.num_vfs,
+                        pf->hw.func_caps.num_msix_vectors,
+                        pf->hw.func_caps.num_msix_vectors_vf,
+                        pf->hw.func_caps.fd_filters_guaranteed,
+                        pf->hw.func_caps.fd_filters_best_effort,
+                        pf->hw.func_caps.num_tx_qp,
+                        pf->hw.func_caps.num_vsis);
+
+       return 0;
+}
+
+static int i40e_vsi_clear(struct i40e_vsi *vsi);
+
+/**
+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
+ * @pf: board private structure
+ **/
+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi;
+       int i;
+
+       if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+               return;
+
+       /* find existing VSI and see if it needs configuring */
+       vsi = NULL;
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+                       vsi = pf->vsi[i];
+                       break;
+               }
+       }
+
+       /* create a new VSI if none exists */
+       if (!vsi) {
+               vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
+                                    pf->vsi[pf->lan_vsi]->seid, 0);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
+                       pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       return;
+               }
+       }
+
+       i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
+}
+
+/**
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_teardown(struct i40e_pf *pf)
+{
+       int i;
+
+       i40e_fdir_filter_exit(pf);
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+                       i40e_vsi_release(pf->vsi[i]);
+                       break;
+               }
+       }
+}
+
+/**
+ * i40e_prep_for_reset - prep for the core to reset
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for PF Reset.
+  **/
+static void i40e_prep_for_reset(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       i40e_status ret = 0;
+       u32 v;
+
+       clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+       if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+               return;
+
+       dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+
+       /* quiesce the VSIs and their queues that are not already DOWN */
+       i40e_pf_quiesce_all_vsi(pf);
+
+       for (v = 0; v < pf->num_alloc_vsi; v++) {
+               if (pf->vsi[v])
+                       pf->vsi[v]->seid = 0;
+       }
+
+       i40e_shutdown_adminq(&pf->hw);
+
+       /* call shutdown HMC */
+       if (hw->hmc.hmc_obj) {
+               ret = i40e_shutdown_lan_hmc(hw);
+               if (ret)
+                       dev_warn(&pf->pdev->dev,
+                                "shutdown_lan_hmc failed: %d\n", ret);
+       }
+}
+
+/**
+ * i40e_send_version - update firmware with driver version
+ * @pf: PF struct
+ */
+static void i40e_send_version(struct i40e_pf *pf)
+{
+       struct i40e_driver_version dv;
+
+       dv.major_version = DRV_VERSION_MAJOR;
+       dv.minor_version = DRV_VERSION_MINOR;
+       dv.build_version = DRV_VERSION_BUILD;
+       dv.subbuild_version = 0;
+       strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+       i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u8 set_fc_aq_fail = 0;
+       i40e_status ret;
+       u32 v;
+
+       /* Now we wait for GRST to settle out.
+        * We don't have to delete the VEBs or VSIs from the hw switch
+        * because the reset will make them disappear.
+        */
+       ret = i40e_pf_reset(hw);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+               set_bit(__I40E_RESET_FAILED, &pf->state);
+               goto clear_recovery;
+       }
+       pf->pfr_count++;
+
+       if (test_bit(__I40E_DOWN, &pf->state))
+               goto clear_recovery;
+       dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
+
+       /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+       ret = i40e_init_adminq(&pf->hw);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               goto clear_recovery;
+       }
+
+       /* re-verify the eeprom if we just had an EMP reset */
+       if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
+               i40e_verify_eeprom(pf);
+
+       i40e_clear_pxe_mode(hw);
+       ret = i40e_get_capabilities(pf);
+       if (ret)
+               goto end_core_reset;
+
+       ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+                               hw->func_caps.num_rx_qp,
+                               pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
+               goto end_core_reset;
+       }
+       ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
+               goto end_core_reset;
+       }
+
+#ifdef CONFIG_DCB
+       ret = i40e_init_pf_dcb(pf);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
+               pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+               /* Continue without DCB enabled */
+       }
+
+#endif /* CONFIG_DCB */
+#ifdef I40E_FCOE
+       i40e_init_pf_fcoe(pf);
+
+#endif
+       /* do basic switch setup */
+       ret = i40e_setup_pf_switch(pf, reinit);
+       if (ret)
+               goto end_core_reset;
+
+       /* driver is only interested in link up/down and module qualification
+        * reports from firmware
+        */
+       ret = i40e_aq_set_phy_int_mask(&pf->hw,
+                                      I40E_AQ_EVENT_LINK_UPDOWN |
+                                      I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+       if (ret)
+               dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+       /* make sure our flow control settings are restored */
+       ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+       if (ret)
+               dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+       /* Rebuild the VSIs and VEBs that existed before reset.
+        * They are still in our local switch element arrays, so only
+        * need to rebuild the switch model in the HW.
+        *
+        * If there were VEBs but the reconstitution failed, we'll try
+        * try to recover minimal use by getting the basic PF VSI working.
+        */
+       if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
+               dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
+               /* find the one VEB connected to the MAC, and find orphans */
+               for (v = 0; v < I40E_MAX_VEB; v++) {
+                       if (!pf->veb[v])
+                               continue;
+
+                       if (pf->veb[v]->uplink_seid == pf->mac_seid ||
+                           pf->veb[v]->uplink_seid == 0) {
+                               ret = i40e_reconstitute_veb(pf->veb[v]);
+
+                               if (!ret)
+                                       continue;
+
+                               /* If Main VEB failed, we're in deep doodoo,
+                                * so give up rebuilding the switch and set up
+                                * for minimal rebuild of PF VSI.
+                                * If orphan failed, we'll report the error
+                                * but try to keep going.
+                                */
+                               if (pf->veb[v]->uplink_seid == pf->mac_seid) {
+                                       dev_info(&pf->pdev->dev,
+                                                "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+                                                ret);
+                                       pf->vsi[pf->lan_vsi]->uplink_seid
+                                                               = pf->mac_seid;
+                                       break;
+                               } else if (pf->veb[v]->uplink_seid == 0) {
+                                       dev_info(&pf->pdev->dev,
+                                                "rebuild of orphan VEB failed: %d\n",
+                                                ret);
+                               }
+                       }
+               }
+       }
+
+       if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
+               dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+               /* no VEB, so rebuild only the Main VSI */
+               ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "rebuild of Main VSI failed: %d\n", ret);
+                       goto end_core_reset;
+               }
+       }
+
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4)) {
+               msleep(75);
+               ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+               if (ret)
+                       dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
+       }
+       /* reinit the misc interrupt */
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+               ret = i40e_setup_misc_vector(pf);
+
+       /* Add a filter to drop all Flow control frames from any VSI from being
+        * transmitted. By doing so we stop a malicious VF from sending out
+        * PAUSE or PFC frames and potentially controlling traffic for other
+        * PF/VF VSIs.
+        * The FW can still send Flow control frames if enabled.
+        */
+       i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+                                                       pf->main_vsi_seid);
+
+       /* restart the VSIs that were rebuilt and running before the reset */
+       i40e_pf_unquiesce_all_vsi(pf);
+
+       if (pf->num_alloc_vfs) {
+               for (v = 0; v < pf->num_alloc_vfs; v++)
+                       i40e_reset_vf(&pf->vf[v], true);
+       }
+
+       /* tell the firmware that we're starting */
+       i40e_send_version(pf);
+
+end_core_reset:
+       clear_bit(__I40E_RESET_FAILED, &pf->state);
+clear_recovery:
+       clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+}
+
+/**
+ * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+       i40e_prep_for_reset(pf);
+       i40e_reset_and_rebuild(pf, false);
+}
+
+/**
+ * i40e_handle_mdd_event
+ * @pf: pointer to the PF structure
+ *
+ * Called from the MDD irq handler to identify possibly malicious vfs
+ **/
+static void i40e_handle_mdd_event(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       bool mdd_detected = false;
+       bool pf_mdd_detected = false;
+       struct i40e_vf *vf;
+       u32 reg;
+       int i;
+
+       if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
+               return;
+
+       /* find what triggered the MDD event */
+       reg = rd32(hw, I40E_GL_MDET_TX);
+       if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+               u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+                               I40E_GL_MDET_TX_PF_NUM_SHIFT;
+               u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+                               I40E_GL_MDET_TX_VF_NUM_SHIFT;
+               u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
+                               I40E_GL_MDET_TX_EVENT_SHIFT;
+               u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+                               I40E_GL_MDET_TX_QUEUE_SHIFT) -
+                            pf->hw.func_caps.base_queue;
+               if (netif_msg_tx_err(pf))
+                       dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
+                                event, queue, pf_num, vf_num);
+               wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+               mdd_detected = true;
+       }
+       reg = rd32(hw, I40E_GL_MDET_RX);
+       if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+               u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+                               I40E_GL_MDET_RX_FUNCTION_SHIFT;
+               u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
+                               I40E_GL_MDET_RX_EVENT_SHIFT;
+               u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+                               I40E_GL_MDET_RX_QUEUE_SHIFT) -
+                            pf->hw.func_caps.base_queue;
+               if (netif_msg_rx_err(pf))
+                       dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
+                                event, queue, func);
+               wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+               mdd_detected = true;
+       }
+
+       if (mdd_detected) {
+               reg = rd32(hw, I40E_PF_MDET_TX);
+               if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+                       wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+                       dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+                       pf_mdd_detected = true;
+               }
+               reg = rd32(hw, I40E_PF_MDET_RX);
+               if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+                       wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+                       dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
+                       pf_mdd_detected = true;
+               }
+               /* Queue belongs to the PF initiate a reset */
+               if (pf_mdd_detected) {
+                       set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+                       i40e_service_event_schedule(pf);
+               }
+       }
+
+       /* see if one of the VFs needs its hand slapped */
+       for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+               vf = &(pf->vf[i]);
+               reg = rd32(hw, I40E_VP_MDET_TX(i));
+               if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+                       wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
+                       vf->num_mdd_events++;
+                       dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", i);
+               }
+
+               reg = rd32(hw, I40E_VP_MDET_RX(i));
+               if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+                       wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
+                       vf->num_mdd_events++;
+                       dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", i);
+               }
+
+               if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
+                       dev_info(&pf->pdev->dev,
+                                "Too many MDD events on VF %d, disabled\n", i);
+                       dev_info(&pf->pdev->dev,
+                                "Use PF Control I/F to re-enable the VF\n");
+                       set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+               }
+       }
+
+       /* re-enable mdd interrupt cause */
+       clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+       reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+       reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+       wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+       i40e_flush(hw);
+}
+
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+/**
+ * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       i40e_status ret;
+       __be16 port;
+       int i;
+
+       if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+               return;
+
+       pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+
+       for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+               if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+                       pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
+                       port = pf->vxlan_ports[i];
+                       if (port)
+                               ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
+                                                    I40E_AQC_TUNNEL_TYPE_VXLAN,
+                                                    NULL, NULL);
+                       else
+                               ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
+
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
+                                        port ? "add" : "delete",
+                                        ntohs(port), i,
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
+                               pf->vxlan_ports[i] = 0;
+                       } else {
+                               dev_info(&pf->pdev->dev,
+                                        "%s vxlan port %d, index %d success\n",
+                                        port ? "add" : "delete",
+                                        ntohs(port), i);
+                       }
+               }
+       }
+}
+
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+/**
+ * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
+ * @q_idx: TX queue number
+ * @vsi: Pointer to VSI struct
+ *
+ * This function checks specified queue for given VSI. Detects hung condition.
+ * Sets hung bit since it is two step process. Before next run of service task
+ * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
+ * hung condition remain unchanged and during subsequent run, this function
+ * issues SW interrupt to recover from hung condition.
+ **/
+static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
+{
+       struct i40e_ring *tx_ring = NULL;
+       struct i40e_pf  *pf;
+       u32 head, val, tx_pending;
+       int i;
+
+       pf = vsi->back;
+
+       /* now that we have an index, find the tx_ring struct */
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+                       if (q_idx == vsi->tx_rings[i]->queue_index) {
+                               tx_ring = vsi->tx_rings[i];
+                               break;
+                       }
+               }
+       }
+
+       if (!tx_ring)
+               return;
+
+       /* Read interrupt register */
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+               val = rd32(&pf->hw,
+                    I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+                                        tx_ring->vsi->base_vector - 1));
+       else
+               val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+       head = i40e_get_head(tx_ring);
+
+       tx_pending = i40e_get_tx_pending(tx_ring);
+
+       /* Interrupts are disabled and TX pending is non-zero,
+        * trigger the SW interrupt (don't wait). Worst case
+        * there will be one extra interrupt which may result
+        * into not cleaning any queues because queues are cleaned.
+        */
+       if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+               i40e_force_wb(vsi, tx_ring->q_vector);
+}
+
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @pf:  pointer to PF struct
+ *
+ * LAN VSI has netdev and netdev has TX queues. This function is to check
+ * each of those TX queues if they are hung, trigger recovery by issuing
+ * SW interrupt.
+ **/
+static void i40e_detect_recover_hung(struct i40e_pf *pf)
+{
+       struct net_device *netdev;
+       struct i40e_vsi *vsi;
+       int i;
+
+       /* Only for LAN VSI */
+       vsi = pf->vsi[pf->lan_vsi];
+
+       if (!vsi)
+               return;
+
+       /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
+       if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+           test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+               return;
+
+       /* Make sure type is MAIN VSI */
+       if (vsi->type != I40E_VSI_MAIN)
+               return;
+
+       netdev = vsi->netdev;
+       if (!netdev)
+               return;
+
+       /* Bail out if netif_carrier is not OK */
+       if (!netif_carrier_ok(netdev))
+               return;
+
+       /* Go thru' TX queues for netdev */
+       for (i = 0; i < netdev->num_tx_queues; i++) {
+               struct netdev_queue *q;
+
+               q = netdev_get_tx_queue(netdev, i);
+               if (q)
+                       i40e_detect_recover_hung_queue(i, vsi);
+       }
+}
+
+/**
+ * i40e_service_task - Run the driver's async subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40e_service_task(struct work_struct *work)
+{
+       struct i40e_pf *pf = container_of(work,
+                                         struct i40e_pf,
+                                         service_task);
+       unsigned long start_time = jiffies;
+
+       /* don't bother with service tasks if a reset is in progress */
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+           test_bit(__I40E_SUSPENDED, &pf->state)) {
+               i40e_service_event_complete(pf);
+               return;
+       }
+
+       i40e_detect_recover_hung(pf);
+       i40e_sync_filters_subtask(pf);
+       i40e_reset_subtask(pf);
+       i40e_handle_mdd_event(pf);
+       i40e_vc_process_vflr_event(pf);
+       i40e_watchdog_subtask(pf);
+       i40e_fdir_reinit_subtask(pf);
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+       i40e_sync_vxlan_filters_subtask(pf);
+
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+       i40e_clean_adminq_subtask(pf);
+
+       i40e_service_event_complete(pf);
+
+       /* If the tasks have taken longer than one timer cycle or there
+        * is more work to be done, reschedule the service task now
+        * rather than wait for the timer to tick again.
+        */
+       if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
+           test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)            ||
+           test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)               ||
+           test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+               i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_service_timer - timer callback
+ * @data: pointer to PF struct
+ **/
+static void i40e_service_timer(unsigned long data)
+{
+       struct i40e_pf *pf = (struct i40e_pf *)data;
+
+       mod_timer(&pf->service_timer,
+                 round_jiffies(jiffies + pf->service_timer_period));
+       i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+
+       switch (vsi->type) {
+       case I40E_VSI_MAIN:
+               vsi->alloc_queue_pairs = pf->num_lan_qps;
+               vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+                                     I40E_REQ_DESCRIPTOR_MULTIPLE);
+               if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+                       vsi->num_q_vectors = pf->num_lan_msix;
+               else
+                       vsi->num_q_vectors = 1;
+
+               break;
+
+       case I40E_VSI_FDIR:
+               vsi->alloc_queue_pairs = 1;
+               vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
+                                     I40E_REQ_DESCRIPTOR_MULTIPLE);
+               vsi->num_q_vectors = 1;
+               break;
+
+       case I40E_VSI_VMDQ2:
+               vsi->alloc_queue_pairs = pf->num_vmdq_qps;
+               vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+                                     I40E_REQ_DESCRIPTOR_MULTIPLE);
+               vsi->num_q_vectors = pf->num_vmdq_msix;
+               break;
+
+       case I40E_VSI_SRIOV:
+               vsi->alloc_queue_pairs = pf->num_vf_qps;
+               vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+                                     I40E_REQ_DESCRIPTOR_MULTIPLE);
+               break;
+
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+               vsi->alloc_queue_pairs = pf->num_fcoe_qps;
+               vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+                                     I40E_REQ_DESCRIPTOR_MULTIPLE);
+               vsi->num_q_vectors = pf->num_fcoe_msix;
+               break;
+
+#endif /* I40E_FCOE */
+       default:
+               WARN_ON(1);
+               return -ENODATA;
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @type: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+       int size;
+       int ret = 0;
+
+       /* allocate memory for both Tx and Rx ring pointers */
+       size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+       vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+       if (!vsi->tx_rings)
+               return -ENOMEM;
+       vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+       if (alloc_qvectors) {
+               /* allocate memory for q_vector pointers */
+               size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+               vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+               if (!vsi->q_vectors) {
+                       ret = -ENOMEM;
+                       goto err_vectors;
+               }
+       }
+       return ret;
+
+err_vectors:
+       kfree(vsi->tx_rings);
+       return ret;
+}
+
+/**
+ * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
+{
+       int ret = -ENODEV;
+       struct i40e_vsi *vsi;
+       int vsi_idx;
+       int i;
+
+       /* Need to protect the allocation of the VSIs at the PF level */
+       mutex_lock(&pf->switch_mutex);
+
+       /* VSI list may be fragmented if VSI creation/destruction has
+        * been happening.  We can afford to do a quick scan to look
+        * for any free VSIs in the list.
+        *
+        * find next empty vsi slot, looping back around if necessary
+        */
+       i = pf->next_vsi;
+       while (i < pf->num_alloc_vsi && pf->vsi[i])
+               i++;
+       if (i >= pf->num_alloc_vsi) {
+               i = 0;
+               while (i < pf->next_vsi && pf->vsi[i])
+                       i++;
+       }
+
+       if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
+               vsi_idx = i;             /* Found one! */
+       } else {
+               ret = -ENODEV;
+               goto unlock_pf;  /* out of VSI slots! */
+       }
+       pf->next_vsi = ++i;
+
+       vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
+       if (!vsi) {
+               ret = -ENOMEM;
+               goto unlock_pf;
+       }
+       vsi->type = type;
+       vsi->back = pf;
+       set_bit(__I40E_DOWN, &vsi->state);
+       vsi->flags = 0;
+       vsi->idx = vsi_idx;
+       vsi->rx_itr_setting = pf->rx_itr_default;
+       vsi->tx_itr_setting = pf->tx_itr_default;
+       vsi->int_rate_limit = 0;
+       vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
+                                                       pf->rss_table_size : 64;
+       vsi->netdev_registered = false;
+       vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
+       INIT_LIST_HEAD(&vsi->mac_filter_list);
+       vsi->irqs_ready = false;
+
+       ret = i40e_set_num_rings_in_vsi(vsi);
+       if (ret)
+               goto err_rings;
+
+       vsi->block_tx_timeout = false;
+
+       ret = i40e_vsi_alloc_arrays(vsi, true);
+       if (ret)
+               goto err_rings;
+
+       /* Setup default MSIX irq handler for VSI */
+       i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+
+       /* Initialize VSI lock */
+       spin_lock_init(&vsi->mac_filter_list_lock);
+       pf->vsi[vsi_idx] = vsi;
+       ret = vsi_idx;
+       goto unlock_pf;
+
+err_rings:
+       pf->next_vsi = i - 1;
+       kfree(vsi);
+unlock_pf:
+       mutex_unlock(&pf->switch_mutex);
+       return ret;
+}
+
+/**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @type: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+       /* free the ring and vector containers */
+       if (free_qvectors) {
+               kfree(vsi->q_vectors);
+               vsi->q_vectors = NULL;
+       }
+       kfree(vsi->tx_rings);
+       vsi->tx_rings = NULL;
+       vsi->rx_rings = NULL;
+}
+
+/**
+ * i40e_vsi_clear - Deallocate the VSI provided
+ * @vsi: the VSI being un-configured
+ **/
+static int i40e_vsi_clear(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf;
+
+       if (!vsi)
+               return 0;
+
+       if (!vsi->back)
+               goto free_vsi;
+       pf = vsi->back;
+
+       mutex_lock(&pf->switch_mutex);
+       if (!pf->vsi[vsi->idx]) {
+               dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
+                       vsi->idx, vsi->idx, vsi, vsi->type);
+               goto unlock_vsi;
+       }
+
+       if (pf->vsi[vsi->idx] != vsi) {
+               dev_err(&pf->pdev->dev,
+                       "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
+                       pf->vsi[vsi->idx]->idx,
+                       pf->vsi[vsi->idx],
+                       pf->vsi[vsi->idx]->type,
+                       vsi->idx, vsi, vsi->type);
+               goto unlock_vsi;
+       }
+
+       /* updates the PF for this cleared vsi */
+       i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+       i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+
+       i40e_vsi_free_arrays(vsi, true);
+
+       pf->vsi[vsi->idx] = NULL;
+       if (vsi->idx < pf->next_vsi)
+               pf->next_vsi = vsi->idx;
+
+unlock_vsi:
+       mutex_unlock(&pf->switch_mutex);
+free_vsi:
+       kfree(vsi);
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+       int i;
+
+       if (vsi->tx_rings && vsi->tx_rings[0]) {
+               for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+                       kfree_rcu(vsi->tx_rings[i], rcu);
+                       vsi->tx_rings[i] = NULL;
+                       vsi->rx_rings[i] = NULL;
+               }
+       }
+}
+
+/**
+ * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_alloc_rings(struct i40e_vsi *vsi)
+{
+       struct i40e_ring *tx_ring, *rx_ring;
+       struct i40e_pf *pf = vsi->back;
+       int i;
+
+       /* Set basic values in the rings to be used later during open() */
+       for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+               /* allocate space for both Tx and Rx in one shot */
+               tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+               if (!tx_ring)
+                       goto err_out;
+
+               tx_ring->queue_index = i;
+               tx_ring->reg_idx = vsi->base_queue + i;
+               tx_ring->ring_active = false;
+               tx_ring->vsi = vsi;
+               tx_ring->netdev = vsi->netdev;
+               tx_ring->dev = &pf->pdev->dev;
+               tx_ring->count = vsi->num_desc;
+               tx_ring->size = 0;
+               tx_ring->dcb_tc = 0;
+               vsi->tx_rings[i] = tx_ring;
+
+               rx_ring = &tx_ring[1];
+               rx_ring->queue_index = i;
+               rx_ring->reg_idx = vsi->base_queue + i;
+               rx_ring->ring_active = false;
+               rx_ring->vsi = vsi;
+               rx_ring->netdev = vsi->netdev;
+               rx_ring->dev = &pf->pdev->dev;
+               rx_ring->count = vsi->num_desc;
+               rx_ring->size = 0;
+               rx_ring->dcb_tc = 0;
+               if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
+                       set_ring_16byte_desc_enabled(rx_ring);
+               else
+                       clear_ring_16byte_desc_enabled(rx_ring);
+               vsi->rx_rings[i] = rx_ring;
+       }
+
+       return 0;
+
+err_out:
+       i40e_vsi_clear_rings(vsi);
+       return -ENOMEM;
+}
+#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT)
+
+/**
+ * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
+ * @pf: board private structure
+ * @vectors: the number of MSI-X vectors to request
+ *
+ * Returns the number of vectors reserved, or error
+ **/
+static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
+{
+       int err = 0;
+
+       while (vectors >= I40E_MIN_MSIX) {
+               err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
+               if (err == 0) {
+                       /* good to go */
+                       break;
+               } else if (err < 0) {
+                       /* total failure */
+                       dev_info(&pf->pdev->dev,
+                                "MSI-X vector reservation failed: %d\n", err);
+                       vectors = 0;
+                       break;
+               }
+               /* err > 0 is the hint for retry */
+               dev_info(&pf->pdev->dev,
+                        "MSI-X vectors wanted %d, retrying with %d\n",
+                        vectors, err);
+               vectors = err;
+       }
+
+       if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+               dev_info(&pf->pdev->dev,
+                        "Couldn't get enough vectors, only %d available\n",
+                        vectors);
+               vectors = 0;
+       }
+
+       return vectors;
+}
+
+/**
+ * i40e_init_msix - Setup the MSIX capability
+ * @pf: board private structure
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns the number of vectors reserved or negative on failure
+ **/
+static int i40e_init_msix(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       int vectors_left;
+       int v_budget, i;
+       int v_actual;
+
+       if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+               return -ENODEV;
+
+       /* The number of vectors we'll request will be comprised of:
+        *   - Add 1 for "other" cause for Admin Queue events, etc.
+        *   - The number of LAN queue pairs
+        *      - This takes into account queues for each TC in DCB mode.
+        *      - Queues being used for RSS.
+        *              We don't need as many as max_rss_size vectors.
+        *              use rss_size instead in the calculation since that
+        *              is governed by number of cpus in the system.
+        *      - assumes symmetric Tx/Rx pairing
+        *   - The number of VMDq pairs
+#ifdef I40E_FCOE
+        *   - The number of FCOE qps.
+#endif
+        * Once we count this up, try the request.
+        *
+        * If we can't get what we want, we'll simplify to nearly nothing
+        * and try again.  If that still fails, we punt.
+        */
+       vectors_left = hw->func_caps.num_msix_vectors;
+       v_budget = 0;
+
+       /* reserve one vector for miscellaneous handler */
+       if (vectors_left) {
+               v_budget++;
+               vectors_left--;
+       }
+
+       /* reserve vectors for the main PF traffic queues */
+       pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+       vectors_left -= pf->num_lan_msix;
+       v_budget += pf->num_lan_msix;
+
+       /* reserve one vector for sideband flow director */
+       if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+               if (vectors_left) {
+                       v_budget++;
+                       vectors_left--;
+               } else {
+                       pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               }
+       }
+
+#ifdef I40E_FCOE
+       /* can we reserve enough for FCoE? */
+       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+               if (!vectors_left)
+                       pf->num_fcoe_msix = 0;
+               else if (vectors_left >= pf->num_fcoe_qps)
+                       pf->num_fcoe_msix = pf->num_fcoe_qps;
+               else
+                       pf->num_fcoe_msix = 1;
+               v_budget += pf->num_fcoe_msix;
+               vectors_left -= pf->num_fcoe_msix;
+       }
+
+#endif
+       /* any vectors left over go for VMDq support */
+       if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+               int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
+               int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
+
+               /* if we're short on vectors for what's desired, we limit
+                * the queues per vmdq.  If this is still more than are
+                * available, the user will need to change the number of
+                * queues/vectors used by the PF later with the ethtool
+                * channels command */
+               if (vmdq_vecs < vmdq_vecs_wanted)
+                       pf->num_vmdq_qps = 1;
+               pf->num_vmdq_msix = pf->num_vmdq_qps;
+
+               v_budget += vmdq_vecs;
+               vectors_left -= vmdq_vecs;
+       }
+
+       pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
+                                  GFP_KERNEL);
+       if (!pf->msix_entries)
+               return -ENOMEM;
+
+       for (i = 0; i < v_budget; i++)
+               pf->msix_entries[i].entry = i;
+       v_actual = i40e_reserve_msix_vectors(pf, v_budget);
+
+       if (v_actual != v_budget) {
+               /* If we have limited resources, we will start with no vectors
+                * for the special features and then allocate vectors to some
+                * of these features based on the policy and at the end disable
+                * the features that did not get any vectors.
+                */
+#ifdef I40E_FCOE
+               pf->num_fcoe_qps = 0;
+               pf->num_fcoe_msix = 0;
+#endif
+               pf->num_vmdq_msix = 0;
+       }
+
+       if (v_actual < I40E_MIN_MSIX) {
+               pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+               kfree(pf->msix_entries);
+               pf->msix_entries = NULL;
+               return -ENODEV;
+
+       } else if (v_actual == I40E_MIN_MSIX) {
+               /* Adjust for minimal MSIX use */
+               pf->num_vmdq_vsis = 0;
+               pf->num_vmdq_qps = 0;
+               pf->num_lan_qps = 1;
+               pf->num_lan_msix = 1;
+
+       } else if (v_actual != v_budget) {
+               int vec;
+
+               /* reserve the misc vector */
+               vec = v_actual - 1;
+
+               /* Scale vector usage down */
+               pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
+               pf->num_vmdq_vsis = 1;
+               pf->num_vmdq_qps = 1;
+               pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+
+               /* partition out the remaining vectors */
+               switch (vec) {
+               case 2:
+                       pf->num_lan_msix = 1;
+                       break;
+               case 3:
+#ifdef I40E_FCOE
+                       /* give one vector to FCoE */
+                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+                               pf->num_lan_msix = 1;
+                               pf->num_fcoe_msix = 1;
+                       }
+#else
+                       pf->num_lan_msix = 2;
+#endif
+                       break;
+               default:
+#ifdef I40E_FCOE
+                       /* give one vector to FCoE */
+                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+                               pf->num_fcoe_msix = 1;
+                               vec--;
+                       }
+#endif
+                       /* give the rest to the PF */
+                       pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
+                       break;
+               }
+       }
+
+       if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+                                       (pf->num_vmdq_msix == 0)) {
+               dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
+               pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+       }
+#ifdef I40E_FCOE
+
+       if ((pf->flags & I40E_FLAG_FCOE_ENABLED) &&
+                                       (pf->num_fcoe_msix == 0)) {
+               dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
+               pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
+       }
+#endif
+       return v_actual;
+}
+#endif
+
+/**
+ * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+       struct i40e_q_vector *q_vector;
+#ifdef HAVE_IRQ_AFFINITY_HINT
+#endif
+
+       /* allocate q_vector */
+       q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+       if (!q_vector)
+               return -ENOMEM;
+
+       q_vector->vsi = vsi;
+       q_vector->v_idx = v_idx;
+#ifdef HAVE_IRQ_AFFINITY_HINT
+       cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+#endif
+       if (vsi->netdev) {
+               netif_napi_add(vsi->netdev, &q_vector->napi,
+                              i40e_napi_poll, NAPI_POLL_WEIGHT);
+               napi_hash_add(&q_vector->napi);
+       }
+
+       q_vector->rx.latency_range = I40E_LOW_LATENCY;
+       q_vector->tx.latency_range = I40E_LOW_LATENCY;
+
+       /* tie q_vector and vsi together */
+       vsi->q_vectors[v_idx] = q_vector;
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       int v_idx, num_q_vectors;
+       int err;
+
+       /* if not MSIX, give the one vector only to the LAN VSI */
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+               num_q_vectors = vsi->num_q_vectors;
+       else if (vsi == pf->vsi[pf->lan_vsi])
+               num_q_vectors = 1;
+       else
+               return -EINVAL;
+
+       for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+               err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+               if (err)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       while (v_idx--)
+               i40e_free_q_vector(vsi, v_idx);
+
+       return err;
+}
+
+/**
+ * i40e_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ **/
+static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
+{
+       int vectors = 0;
+       ssize_t size;
+
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT)
+               vectors = i40e_init_msix(pf);
+#else
+               vectors = -1;
+#endif
+               if (vectors < 0) {
+                       pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
+#ifdef I40E_FCOE
+                                      I40E_FLAG_FCOE_ENABLED   |
+#endif
+                                      I40E_FLAG_RSS_ENABLED    |
+                                      I40E_FLAG_DCB_CAPABLE    |
+                                      I40E_FLAG_SRIOV_ENABLED  |
+                                      I40E_FLAG_FD_SB_ENABLED  |
+                                      I40E_FLAG_FD_ATR_ENABLED |
+                                      I40E_FLAG_VMDQ_ENABLED);
+
+                       /* rework the queue expectations without MSIX */
+                       i40e_determine_queue_usage(pf);
+               }
+       }
+
+       if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+           (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+               dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
+#ifndef I40E_LEGACY_INTERRUPT
+               vectors = pci_enable_msi(pf->pdev);
+#else
+               vectors = -1;
+#endif
+               if (vectors < 0) {
+                       dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
+                                vectors);
+                       pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+               }
+               vectors = 1;  /* one MSI or Legacy vector */
+       }
+
+       if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+               dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
+
+       /* set up vector assignment tracking */
+       size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
+       pf->irq_pile = kzalloc(size, GFP_KERNEL);
+       if (!pf->irq_pile) {
+               dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+               return -ENOMEM;
+       }
+       pf->irq_pile->num_entries = vectors;
+       pf->irq_pile->search_hint = 0;
+
+       /* track first vector for misc interrupts, ignore return */
+       (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+
+       return 0;
+}
+
+/**
+ * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors.  This is not used
+ * when in MSI or Legacy interrupt mode.
+ **/
+static int i40e_setup_misc_vector(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       int err = 0;
+
+       /* Only request the irq if this is the first time through, and
+        * not when we're rebuilding after a Reset
+        */
+       if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+               err = request_irq(pf->msix_entries[0].vector,
+                                 i40e_intr, 0, pf->int_name, pf);
+               if (err) {
+                       dev_info(&pf->pdev->dev,
+                                "request_irq for %s failed: %d\n",
+                                pf->int_name, err);
+                       return -EFAULT;
+               }
+       }
+
+       i40e_enable_misc_int_causes(pf);
+
+       /* associate no queues to the misc vector */
+       wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
+       wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+
+       i40e_flush(hw);
+
+       i40e_irq_dynamic_enable_icr0(pf);
+
+       return err;
+}
+
+/**
+ * i40e_config_rss_reg - Prepare for RSS if used
+ * @pf: board private structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_hw *hw = &pf->hw;
+       u32 *seed_dw = (u32 *)seed;
+       u32 current_queue = 0;
+       u32 lut = 0;
+       int i, j;
+
+       /* Fill out hash function seed */
+       for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+               wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+
+       for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+               lut = 0;
+               for (j = 0; j < 4; j++) {
+                       if (current_queue == vsi->rss_size)
+                               current_queue = 0;
+                       lut |= ((current_queue) << (8 * j));
+                       current_queue++;
+               }
+               wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
+       }
+       i40e_flush(hw);
+
+       return 0;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       u8 seed[I40E_HKEY_ARRAY_SIZE];
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_val;
+       u64 hena;
+
+       netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+
+       /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
+       hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+               ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+       hena |= i40e_pf_get_default_rss_hena(pf);
+
+       wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+       wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+       vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
+       /* Determine the RSS table size based on the hardware capabilities */
+       reg_val = rd32(hw, I40E_PFQF_CTL_0);
+       reg_val = (pf->rss_table_size == 512) ?
+                       (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+                       (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
+       wr32(hw, I40E_PFQF_CTL_0, reg_val);
+
+       return i40e_config_rss_reg(pf, seed);
+}
+
+/**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       int new_rss_size;
+
+       if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+               return 0;
+
+       new_rss_size = min_t(int, queue_count, pf->rss_size_max);
+
+       if (queue_count != vsi->num_queue_pairs) {
+               vsi->req_queue_pairs = queue_count;
+               i40e_prep_for_reset(pf);
+
+               pf->rss_size = new_rss_size;
+
+               i40e_reset_and_rebuild(pf, true);
+               i40e_config_rss(pf);
+       }
+       dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
+       return pf->rss_size;
+}
+
+/**
+ * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
+{
+       i40e_status status;
+       bool min_valid, max_valid;
+       u32 max_bw, min_bw;
+
+       status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+                       &min_valid, &max_valid);
+
+       if (!status) {
+               if (min_valid)
+                       pf->min_bw = min_bw;
+               if (max_valid)
+                       pf->max_bw = max_bw;
+       }
+
+       return status;
+}
+
+/**
+ * i40e_set_partition_bw_setting - Set BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+{
+       struct i40e_aqc_configure_partition_bw_data bw_data;
+       i40e_status status;
+
+       /* Set the valid bit for this PF */
+       bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
+       bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+       bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
+
+       /* Set the new bandwidths */
+       status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
+
+       return status;
+}
+
+/**
+ * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+{
+       /* Commit temporary BW setting to permanent NVM image */
+       enum i40e_admin_queue_err last_aq_status;
+       i40e_status ret;
+       u16 nvm_word;
+
+       if (pf->hw.partition_id != 1) {
+               dev_info(&pf->pdev->dev,
+                        "Commit BW only works on partition 1! This is partition %d",
+                        pf->hw.partition_id);
+               ret = I40E_NOT_SUPPORTED;
+               goto bw_commit_out;
+       }
+
+       /* Acquire NVM for read access */
+       ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+       last_aq_status = pf->hw.aq.asq_last_status;
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Cannot acquire NVM for read access, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
+               goto bw_commit_out;
+       }
+
+       /* Read word 0x10 of NVM - SW compatibility word 1 */
+       ret = i40e_aq_read_nvm(&pf->hw,
+                              I40E_SR_NVM_CONTROL_WORD,
+                              0x10, sizeof(nvm_word), &nvm_word,
+                              false, NULL);
+       /* Save off last admin queue command status before releasing
+        * the NVM
+        */
+       last_aq_status = pf->hw.aq.asq_last_status;
+       i40e_release_nvm(&pf->hw);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
+               goto bw_commit_out;
+       }
+
+       /* Wait a bit for NVM release to complete */
+       msleep(50);
+
+       /* Acquire NVM for write access */
+       ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
+       last_aq_status = pf->hw.aq.asq_last_status;
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Cannot acquire NVM for write access, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
+               goto bw_commit_out;
+       }
+       /* Write it back out unchanged to initiate update NVM,
+        * which will force a write of the shadow (alt) RAM to
+        * the NVM - thus storing the bandwidth values permanently.
+        */
+       ret = i40e_aq_update_nvm(&pf->hw,
+                                I40E_SR_NVM_CONTROL_WORD,
+                                0x10, sizeof(nvm_word),
+                                &nvm_word, true, NULL);
+       /* Save off last admin queue command status before releasing
+        * the NVM
+        */
+       last_aq_status = pf->hw.aq.asq_last_status;
+       i40e_release_nvm(&pf->hw);
+       if (ret)
+               dev_info(&pf->pdev->dev,
+                        "BW settings NOT SAVED, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
+bw_commit_out:
+
+       return ret;
+}
+
+/**
+ * i40e_sw_init - Initialize general software structures (struct i40e_pf)
+ * @pf: board private structure to initialize
+ *
+ * i40e_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+#ifdef HAVE_CONFIG_HOTPLUG
+static int __devinit i40e_sw_init(struct i40e_pf *pf)
+#else
+static int i40e_sw_init(struct i40e_pf *pf)
+#endif
+{
+       int err = 0;
+       int size;
+
+       pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
+                               (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+       pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
+       if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
+               if (I40E_DEBUG_USER & debug)
+                       pf->hw.debug_mask = debug;
+               pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
+                                               I40E_DEFAULT_MSG_ENABLE);
+       }
+
+       /* Set default capability flags */
+       pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
+                   I40E_FLAG_MSI_ENABLED     |
+                   I40E_FLAG_LINK_POLLING_ENABLED |
+                   I40E_FLAG_MSIX_ENABLED;
+
+#ifdef HAVE_IOMMU_PRESENT
+       if (iommu_present(&pci_bus_type))
+               pf->flags |= I40E_FLAG_RX_PS_ENABLED;
+       else
+#endif
+               pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
+
+       /* Set default ITR */
+       pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
+       pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+       /* Depending on PF configurations, it is possible that the RSS
+        * maximum might end up larger than the available queues
+        */
+       pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
+       pf->rss_size = 1;
+       pf->rss_table_size = pf->hw.func_caps.rss_table_size;
+       pf->rss_size_max = min_t(int, pf->rss_size_max,
+                                pf->hw.func_caps.num_tx_qp);
+
+       if (pf->hw.func_caps.rss) {
+               pf->flags |= I40E_FLAG_RSS_ENABLED;
+               pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
+       }
+       /* MFP mode enabled */
+       if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
+               pf->flags |= I40E_FLAG_MFP_ENABLED;
+
+               dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+               if (i40e_get_partition_bw_setting(pf)) {
+                       dev_warn(&pf->pdev->dev,
+                                "Could not get partition bw settings\n");
+               } else {
+                       dev_info(&pf->pdev->dev,
+                                "Partition BW Min = %8.8x, Max = %8.8x\n",
+                                pf->min_bw, pf->max_bw);
+
+                       /* nudge the Tx scheduler */
+                       i40e_set_partition_bw_setting(pf);
+               }
+       }
+
+       if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+           (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+               pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+               if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+                   pf->hw.num_partitions > 1)
+                       dev_info(&pf->pdev->dev,
+                                "Flow Director Sideband mode Disabled in MFP mode\n");
+               else
+                       pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+               pf->fdir_pf_filter_count =
+                                pf->hw.func_caps.fd_filters_guaranteed;
+               pf->hw.fdir_shared_filter_count =
+                                pf->hw.func_caps.fd_filters_best_effort;
+       }
+
+       if (pf->hw.func_caps.vmdq) {
+               pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
+               pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+               pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
+       }
+
+#ifdef I40E_FCOE
+       i40e_init_pf_fcoe(pf);
+
+#endif /* I40E_FCOE */
+#ifdef CONFIG_PCI_IOV
+       if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+               pf->num_req_vfs = 0;
+               if (max_vfs[pf->instance] > 0 &&
+                   max_vfs[pf->instance] <= pf->hw.func_caps.num_vfs) {
+                       pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+                       /* assign number of SR-IOV VFs */
+                       pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+                       pf->num_req_vfs = max_vfs[pf->instance];
+               } else if (max_vfs[pf->instance] == 0)  {
+                       dev_info(&pf->pdev->dev,
+                                " SR-IOV is disabled, Module Parameter max_vfs value %d = disabled\n",
+                                max_vfs[pf->instance]);
+               } else if (max_vfs[pf->instance] != -1) {
+                       dev_err(&pf->pdev->dev,
+                               "Module Parameter max_vfs value %d is out of range. Maximum value for the device: %d - resetting to zero\n",
+                               max_vfs[pf->instance],
+                               pf->hw.func_caps.num_vfs);
+               }
+#else
+               pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+               pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+               pf->num_req_vfs = min_t(int,
+                                       pf->hw.func_caps.num_vfs,
+                                       I40E_MAX_VF_COUNT);
+#endif /* HAVE_SRIOV_CONFIGURE */
+       }
+#endif /* CONFIG_PCI_IOV */
+       pf->eeprom_version = 0xDEAD;
+       pf->lan_veb = I40E_NO_VEB;
+       pf->lan_vsi = I40E_NO_VSI;
+
+       /* By default FW has this off for performance reasons */
+       pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
+       /* set up queue assignment tracking */
+       size = sizeof(struct i40e_lump_tracking)
+               + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
+       pf->qp_pile = kzalloc(size, GFP_KERNEL);
+       if (!pf->qp_pile) {
+               err = -ENOMEM;
+               goto sw_init_done;
+       }
+       pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
+       pf->qp_pile->search_hint = 0;
+
+       pf->tx_timeout_recovery_level = 1;
+
+       mutex_init(&pf->switch_mutex);
+
+sw_init_done:
+       return err;
+}
+
+#ifdef HAVE_NDO_SET_FEATURES
+#endif
+/**
+ * i40e_set_ntuple - set the ntuple feature flag and take action
+ * @pf: board private structure to initialize
+ * @features: the feature set that the stack is suggesting
+ *
+ * returns a bool to indicate if reset needs to happen
+ **/
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
+{
+       bool need_reset = false;
+
+       /* Check if Flow Director n-tuple support was enabled or disabled.  If
+        * the state changed, we need to reset.
+        */
+       if (features & NETIF_F_NTUPLE) {
+               /* Enable filters and mark for reset */
+               if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+                       need_reset = true;
+               pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+       } else {
+               /* turn off filters, mark for reset and clear SW filter list */
+               if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+                       need_reset = true;
+                       i40e_fdir_filter_exit(pf);
+               }
+               pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               /* reset fd counters */
+               pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
+               pf->fdir_pf_active_filters = 0;
+               pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                       dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+               /* if ATR was auto disabled it can be re-enabled. */
+               if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+       }
+       return need_reset;
+}
+
+#ifdef HAVE_NDO_SET_FEATURES
+/**
+ * i40e_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ **/
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static int i40e_set_features(struct net_device *netdev, u32 features)
+#else
+static int i40e_set_features(struct net_device *netdev,
+                            netdev_features_t features)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       bool need_reset;
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+#else
+       if (features & NETIF_F_HW_VLAN_RX)
+#endif
+               i40e_vlan_stripping_enable(vsi);
+       else
+               i40e_vlan_stripping_disable(vsi);
+
+       need_reset = i40e_set_ntuple(pf, features);
+
+       if (need_reset)
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+
+       return 0;
+}
+
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+/**
+ * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * @pf: board private structure
+ * @port: The UDP port to look up
+ *
+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ **/
+static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+{
+       u8 i;
+
+       for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+               if (pf->vxlan_ports[i] == port)
+                       return i;
+       }
+
+       return i;
+}
+
+/**
+ * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void i40e_add_vxlan_port(struct net_device *netdev,
+                               sa_family_t sa_family, __be16 port)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       u8 next_idx;
+       u8 idx;
+
+       if (sa_family == AF_INET6)
+               return;
+
+       idx = i40e_get_vxlan_port_idx(pf, port);
+
+       /* Check if port already exists */
+       if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+               netdev_info(netdev, "vxlan port %d already offloaded\n",
+                           ntohs(port));
+               return;
+       }
+
+       /* Now check if there is space to add the new port */
+       next_idx = i40e_get_vxlan_port_idx(pf, 0);
+
+       if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+               netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
+                           ntohs(port));
+               return;
+       }
+
+       /* New port: add it and mark its index in the bitmap */
+       pf->vxlan_ports[next_idx] = port;
+       pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
+       pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+}
+
+/**
+ * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+                               sa_family_t sa_family, __be16 port)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       u8 idx;
+
+       if (sa_family == AF_INET6)
+               return;
+
+       idx = i40e_get_vxlan_port_idx(pf, port);
+
+       /* Check if port already exists */
+       if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+               /* if port exists, set it to 0 (mark for deletion)
+                * and make it pending
+                */
+               pf->vxlan_ports[idx] = 0;
+               pf->pending_vxlan_bitmap |= BIT_ULL(idx);
+               pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+       } else {
+               netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
+                           ntohs(port));
+       }
+}
+
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#ifdef HAVE_NDO_GET_PHYS_PORT_ID
+static int i40e_get_phys_port_id(struct net_device *netdev,
+                                struct netdev_phys_item_id *ppid)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+
+       if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = min_t(int, sizeof(hw->mac.port_addr),
+                            sizeof(ppid->id));
+       memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
+
+       return 0;
+}
+
+#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                            struct net_device *dev,
+                            const unsigned char *addr,
+#ifdef HAVE_NDO_FDB_ADD_VID
+                            u16 vid,
+#endif
+                            u16 flags)
+#else
+static int i40e_ndo_fdb_add(struct ndmsg *ndm,
+                            struct net_device *dev,
+                            unsigned char *addr,
+#ifdef HAVE_NDO_FDB_ADD_VID
+                            u16 vid,
+#endif
+                            u16 flags)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_pf *pf = np->vsi->back;
+       int err = 0;
+
+       if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+               return -EOPNOTSUPP;
+
+       /* Hardware does not support aging addresses so if a
+        * ndm_state is given only allow permanent addresses
+        */
+       if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+               netdev_info(dev, "FDB only supports static addresses\n");
+               return -EINVAL;
+       }
+
+       if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+               err = dev_uc_add_excl(dev, addr);
+       else if (is_multicast_ether_addr(addr))
+               err = dev_mc_add_excl(dev, addr);
+       else
+               err = -EINVAL;
+
+       /* Only return duplicate errors if NLM_F_EXCL is set */
+       if (err == -EEXIST && !(flags & NLM_F_EXCL))
+               err = 0;
+
+       return err;
+}
+
+#ifdef HAVE_NDO_FEATURES_CHECK
+#define I40E_MAX_TUNNEL_HDR_LEN 80
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+                                            struct net_device *dev,
+                                            netdev_features_t features)
+{
+       u8 protocol = 0;
+
+       if (!skb->encapsulation)
+               return features;
+
+       /* prevent tunnel headers that are too long to offload from
+        * being sent to the hardware
+        */
+       if (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+           I40E_MAX_TUNNEL_HDR_LEN)
+               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+       /* this is a somewhat temporary patch to prevent the driver
+        * from trying to offload tunnels it cannot support
+        * currently the only supported tunnel is VxLAN,
+        * this code looks like vxlan_features_check but is not
+        * the same.
+        */
+
+       switch (vlan_get_protocol(skb)) {
+       case htons(ETH_P_IP):
+               protocol = ip_hdr(skb)->protocol;
+               break;
+       case htons(ETH_P_IPV6):
+               protocol = ipv6_hdr(skb)->nexthdr;
+               break;
+       default:
+               return features;;
+       }
+
+       if ((protocol != IPPROTO_UDP) ||
+           (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+            skb->inner_protocol != htons(ETH_P_TEB) ||
+            (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+             sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+       
+       return features;
+}
+
+#endif /* HAVE_NDO_FEATURES_CHECK */
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+#ifdef HAVE_NDO_FDB_ADD_VID
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+                            struct net_device *dev,
+                            const unsigned char *addr,
+                            u16 vid)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+                            struct net_device *dev,
+                            const unsigned char *addr)
+#endif
+#else
+#ifdef HAVE_NDO_FDB_ADD_VID
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+                            struct net_device *dev,
+                            unsigned char *addr,
+                            u16 vid)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+                            struct net_device *dev,
+                            unsigned char *addr)
+#endif
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_pf *pf = np->vsi->back;
+       int err = -EOPNOTSUPP;
+
+       if (ndm->ndm_state & NUD_PERMANENT) {
+               netdev_info(dev, "FDB only supports static addresses\n");
+               return -EINVAL;
+       }
+
+       if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+               if (is_unicast_ether_addr(addr))
+                       err = dev_uc_del(dev, addr);
+               else if (is_multicast_ether_addr(addr))
+                       err = dev_mc_del(dev, addr);
+               else
+                       err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
+                             struct netlink_callback *cb,
+                             struct net_device *dev,
+                             int idx)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+       return idx;
+}
+
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#ifdef HAVE_BRIDGE_ATTRIBS
+/**
+ * i40e_ndo_bridge_setlink - Set the hardware bridge mode
+ * @dev: the netdev being configured
+ * @nlh: RTNL message
+ *
+ * Inserts a new hardware bridge if not already created and
+ * enables the bridging mode requested (VEB or VEPA). If the
+ * hardware bridge has already been inserted and the request
+ * is to change the mode then that requires a PF reset to
+ * allow rebuild of the components with required hardware
+ * bridge mode enabled.
+ **/
+#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+                                  struct nlmsghdr *nlh,
+                                  u16 flags)
+#else
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+                                  struct nlmsghdr *nlh)
+#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_veb *veb = NULL;
+       struct nlattr *attr, *br_spec;
+       int i, rem;
+
+       /* Only for PF VSI for now */
+       if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+               return -EOPNOTSUPP;
+
+       /* Find the HW bridge for PF VSI */
+       for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+               if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+                       veb = pf->veb[i];
+       }
+
+       br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+       nla_for_each_nested(attr, br_spec, rem) {
+               __u16 mode;
+
+               if (nla_type(attr) != IFLA_BRIDGE_MODE)
+                       continue;
+
+               mode = nla_get_u16(attr);
+               if ((mode != BRIDGE_MODE_VEPA) &&
+                   (mode != BRIDGE_MODE_VEB))
+                       return -EINVAL;
+
+               /* Insert a new HW bridge */
+               if (!veb) {
+                       veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+                                            vsi->tc_config.enabled_tc);
+                       if (veb) {
+                               veb->bridge_mode = mode;
+                               i40e_config_bridge_mode(veb);
+                       } else {
+                               /* No Bridge HW offload available */
+                               return -ENOENT;
+                       }
+                       break;
+               } else if (mode != veb->bridge_mode) {
+                       /* Existing HW bridge but different mode needs reset */
+                       veb->bridge_mode = mode;
+                       if (mode == BRIDGE_MODE_VEB)
+                               pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       else
+                               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_ndo_bridge_getlink - Get the hardware bridge mode
+ * @skb: skb buff
+ * @pid: process id
+ * @seq: RTNL message seq #
+ * @dev: the netdev being configured
+ * @filter_mask: unused
+ * @nlflags: netlink flags passed in
+ *
+ * Return the mode in which the hardware bridge is operating in
+ * i.e VEB or VEPA.
+ **/
+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                                  struct net_device *dev,
+                                  u32 __always_unused filter_mask,
+                                  int nlflags)
+#elif defined(HAVE_BRIDGE_FILTER)
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                                  struct net_device *dev,
+                                  u32 __always_unused filter_mask)
+#else
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                                  struct net_device *dev)
+#endif /* NDO_BRIDGE_STUFF */
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_veb *veb = NULL;
+       int i;
+
+       /* Only for PF VSI for now */
+       if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+               return -EOPNOTSUPP;
+
+       /* Find the HW bridge for the PF VSI */
+       for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+               if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+                       veb = pf->veb[i];
+       }
+
+       if (!veb)
+               return 0;
+
+#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+                                      0, 0, nlflags, filter_mask, NULL);
+#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS)
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+                                      0, 0, nlflags);
+#elif defined(HAVE_NDO_FDB_ADD_VID) || \
+       defined NDO_BRIDGE_GETLINK_HAS_FILTER_MASK_PARAM
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+                                      0, 0);
+#else
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
+#endif /* HAVE_NDO_BRIDGE_XX */
+}
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif /* HAVE_FDB_OPS */
+
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops i40e_netdev_ops = {
+       .ndo_open               = i40e_open,
+       .ndo_stop               = i40e_close,
+       .ndo_start_xmit         = i40e_lan_xmit_frame,
+#ifdef HAVE_NDO_GET_STATS64
+       .ndo_get_stats64        = i40e_get_netdev_stats_struct,
+#else
+       .ndo_get_stats          = i40e_get_netdev_stats_struct,
+#endif
+       .ndo_set_rx_mode        = i40e_set_rx_mode,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = i40e_set_mac,
+       .ndo_change_mtu         = i40e_change_mtu,
+#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL)
+       .ndo_do_ioctl           = i40e_ioctl,
+#endif
+       .ndo_tx_timeout         = i40e_tx_timeout,
+#ifdef HAVE_VLAN_RX_REGISTER
+       .ndo_vlan_rx_register   = i40e_vlan_rx_register,
+#endif
+       .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = i40e_netpoll,
+#endif
+#ifdef HAVE_SETUP_TC
+       .ndo_setup_tc           = i40e_setup_tc,
+#endif /* HAVE_SETUP_TC */
+#ifdef I40E_FCOE
+       .ndo_fcoe_enable        = i40e_fcoe_enable,
+       .ndo_fcoe_disable       = i40e_fcoe_disable,
+#endif
+#ifdef IFLA_VF_MAX
+       .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
+       .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+       .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
+#else
+       .ndo_set_vf_tx_rate     = i40e_ndo_set_vf_bw,
+#endif
+       .ndo_get_vf_config      = i40e_ndo_get_vf_config,
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
+#endif
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+       .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
+#endif
+#endif /* IFLA_VF_MAX */
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+       .ndo_add_vxlan_port     = i40e_add_vxlan_port,
+       .ndo_del_vxlan_port     = i40e_del_vxlan_port,
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#ifdef HAVE_NDO_GET_PHYS_PORT_ID
+       .ndo_get_phys_port_id   = i40e_get_phys_port_id,
+#endif /* HAVE_NDO_GET_PHYS_PORT_ID */
+#ifdef HAVE_FDB_OPS
+       .ndo_fdb_add            = i40e_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+       .ndo_fdb_del            = i40e_ndo_fdb_del,
+       .ndo_fdb_dump           = i40e_ndo_fdb_dump,
+#endif
+#ifdef HAVE_NDO_FEATURES_CHECK
+       .ndo_features_check     = i40e_features_check,
+#endif /* HAVE_NDO_FEATURES_CHECK */
+#ifdef HAVE_BRIDGE_ATTRIBS
+       .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
+       .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif /* HAVE_FDB_OPS */
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext i40e_netdev_ops_ext = {
+       .size                   = sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#ifdef HAVE_NDO_SET_FEATURES
+       .ndo_set_features       = i40e_set_features,
+#endif /* HAVE_NDO_SET_FEATURES */
+};
+
+#endif /* HAVE_NET_DEVICE_OPS */
+/**
+ * i40e_assign_netdev_ops - Initialize netdev operations function pointers
+ * @dev: ptr to the netdev struct
+ **/
+#ifdef HAVE_CONFIG_HOTPLUG
+static void __devinit i40e_assign_netdev_ops(struct net_device *dev)
+#else
+static void i40e_assign_netdev_ops(struct net_device *dev)
+#endif
+{
+#ifdef HAVE_NET_DEVICE_OPS
+       dev->netdev_ops = &i40e_netdev_ops;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       set_netdev_ops_ext(dev, &i40e_netdev_ops_ext);
+#endif
+#else /* HAVE_NET_DEVICE_OPS */
+       dev->open = i40e_open;
+       dev->stop = i40e_close;
+       dev->hard_start_xmit = i40e_lan_xmit_frame;
+       dev->get_stats = i40e_get_netdev_stats_struct;
+
+#ifdef HAVE_SET_RX_MODE
+       dev->set_rx_mode = i40e_set_rx_mode;
+#endif
+       dev->set_multicast_list = i40e_set_rx_mode;
+       dev->set_mac_address = i40e_set_mac;
+       dev->change_mtu = i40e_change_mtu;
+#if defined(HAVE_PTP_1588_CLOCK) || defined(HAVE_I40E_INTELCIM_IOCTL)
+       dev->do_ioctl = i40e_ioctl;
+#endif
+       dev->tx_timeout = i40e_tx_timeout;
+#ifdef NETIF_F_HW_VLAN_TX
+       dev->vlan_rx_register = i40e_vlan_rx_register;
+       dev->vlan_rx_add_vid = i40e_vlan_rx_add_vid;
+       dev->vlan_rx_kill_vid = i40e_vlan_rx_kill_vid;
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       dev->poll_controller = i40e_netpoll;
+#endif
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+       dev->select_queue = i40e_lan_select_queue;
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#endif /* HAVE_NET_DEVICE_OPS */
+}
+
+/**
+ * i40e_config_netdev - Setup the netdev flags
+ * @vsi: the VSI being configured
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_config_netdev(struct i40e_vsi *vsi)
+{
+       u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_netdev_priv *np;
+       struct net_device *netdev;
+       u8 mac_addr[ETH_ALEN];
+       int etherdev_size;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       u32 hw_features;
+#endif
+
+       etherdev_size = sizeof(struct i40e_netdev_priv);
+       netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
+       if (!netdev)
+               return -ENOMEM;
+
+       vsi->netdev = netdev;
+       np = netdev_priv(netdev);
+       np->vsi = vsi;
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
+       netdev->hw_enc_features = NETIF_F_IP_CSUM        |
+                                 NETIF_F_SCTP_CSUM      |
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
+                                 NETIF_F_GSO_UDP_TUNNEL |
+                                 NETIF_F_TSO            |
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+                                 NETIF_F_SG;
+#endif /* HAVE_ENCAP_CSUM_OFFLOAD */
+
+       netdev->features = NETIF_F_SG                  |
+                          NETIF_F_IP_CSUM             |
+                          NETIF_F_SCTP_CSUM           |
+                          NETIF_F_HIGHDMA             |
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
+                          NETIF_F_GSO_UDP_TUNNEL      |
+#endif
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+                          NETIF_F_HW_VLAN_CTAG_TX     |
+                          NETIF_F_HW_VLAN_CTAG_RX     |
+                          NETIF_F_HW_VLAN_CTAG_FILTER |
+#else
+                          NETIF_F_HW_VLAN_TX          |
+                          NETIF_F_HW_VLAN_RX          |
+                          NETIF_F_HW_VLAN_FILTER      |
+#endif
+#ifdef NETIF_F_IPV6_CSUM
+                          NETIF_F_IPV6_CSUM           |
+#endif
+#ifdef NETIF_F_TSO
+                          NETIF_F_TSO                 |
+                          NETIF_F_TSO_ECN             |
+#ifdef NETIF_F_TSO6
+                          NETIF_F_TSO6                |
+#endif /* NETIF_F_TSO6 */
+#endif /* NETIF_F_TSO */
+#ifdef HAVE_NDO_SET_FEATURES
+                          NETIF_F_RXCSUM              |
+#endif
+#ifdef NETIF_F_RXHASH
+                          NETIF_F_RXHASH              |
+#endif /* NETIF_F_RXHASH */
+                          0;
+#if defined(HAVE_NDO_SET_FEATURES) || defined(ETHTOOL_GRXRINGS)
+
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               netdev->features |= NETIF_F_NTUPLE;
+#endif
+
+#ifdef HAVE_NDO_SET_FEATURES
+       /* copy netdev features into list of user selectable features */
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+       hw_features = get_netdev_hw_features(netdev);
+       hw_features |= netdev->features;
+       set_netdev_hw_features(netdev, hw_features);
+#else
+       netdev->hw_features |= netdev->features;
+#endif
+#else
+#ifdef NETIF_F_GRO
+       netdev->features |= NETIF_F_GRO;
+#endif /* NETIF_F_GRO */
+#endif /* HAVE_NDO_SET_FEATURES */
+
+       if (vsi->type == I40E_VSI_MAIN) {
+               SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+               ether_addr_copy(mac_addr, hw->mac.perm_addr);
+               /* The following steps are necessary to prevent reception
+                * of tagged packets - some older NVM configurations load a
+                * default a MAC-VLAN filter that accepts any tagged packet
+                * which must be replaced by a normal filter.
+                */
+               if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
+                       spin_lock_bh(&vsi->mac_filter_list_lock);
+                       i40e_add_filter(vsi, mac_addr,
+                                       I40E_VLAN_ANY, false, true);
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+               }
+       } else {
+               /* relate the VSI_VMDQ name to the VSI_MAIN name */
+               snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
+                        pf->vsi[pf->lan_vsi]->netdev->name);
+               random_ether_addr(mac_addr);
+
+               spin_lock_bh(&vsi->mac_filter_list_lock);
+               i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+       }
+
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+       i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       ether_addr_copy(netdev->dev_addr, mac_addr);
+#ifdef ETHTOOL_GPERMADDR
+       ether_addr_copy(netdev->perm_addr, mac_addr);
+#endif
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+       /* vlan gets same features (except vlan offload)
+        * after any tweaks for specific VSI types
+        */
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+       netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
+                                                    NETIF_F_HW_VLAN_CTAG_RX |
+                                                  NETIF_F_HW_VLAN_CTAG_FILTER);
+#else
+       netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_TX |
+                                                    NETIF_F_HW_VLAN_FILTER |
+                                                    NETIF_F_HW_VLAN_RX);
+#endif
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+#ifdef IFF_UNICAST_FLT
+       netdev->priv_flags |= IFF_UNICAST_FLT;
+#endif
+#ifdef IFF_SUPP_NOFCS
+       netdev->priv_flags |= IFF_SUPP_NOFCS;
+#endif
+       /* Setup netdev TC information */
+       i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
+
+       i40e_assign_netdev_ops(netdev);
+       netdev->watchdog_timeo = 5 * HZ;
+
+#ifdef SIOCETHTOOL
+       i40e_set_ethtool_ops(netdev);
+#endif
+#ifdef I40E_FCOE
+       i40e_fcoe_config_netdev(netdev, vsi);
+#endif
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_delete - Delete a VSI from the switch
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static void i40e_vsi_delete(struct i40e_vsi *vsi)
+{
+       /* remove default VSI is not allowed */
+       if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
+               return;
+
+       i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
+}
+
+/**
+ * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
+ * @vsi: the VSI being queried
+ *
+ * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
+ **/
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
+{
+       struct i40e_veb *veb;
+       struct i40e_pf *pf = vsi->back;
+
+       /* Uplink is not a bridge so default to VEB */
+       if (vsi->veb_idx == I40E_NO_VEB)
+               return 1;
+
+       veb = pf->veb[vsi->veb_idx];
+       if (!veb) {
+               dev_info(&pf->pdev->dev,
+                        "There is no veb associated with the bridge\n");
+               return -ENOENT;
+       }
+
+#ifdef HAVE_BRIDGE_ATTRIBS
+       /* Uplink is a bridge in VEPA mode */
+       if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
+               return 0;
+       } else {
+               /* Uplink is a bridge in VEB mode */
+               return 1;
+       }
+#else
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               return 1;
+#endif
+
+       /* VEPA is now default bridge, so return 0 */
+       return 0;
+}
+
+/**
+ * i40e_add_vsi - Add a VSI to the switch
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command.
+ **/
+static int i40e_add_vsi(struct i40e_vsi *vsi)
+{
+       int ret = -ENODEV;
+       u8 laa_macaddr[ETH_ALEN];
+       bool found_laa_mac_filter = false;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_vsi_context ctxt;
+       struct i40e_mac_filter *f, *ftmp;
+
+       u8 enabled_tc = 0x1; /* TC0 enabled */
+       int f_count = 0;
+       u32 val;
+
+       memset(&ctxt, 0, sizeof(ctxt));
+       switch (vsi->type) {
+       case I40E_VSI_MAIN:
+               /* The PF's main VSI is already setup as part of the
+                * device initialization, so we'll not bother with
+                * the add_vsi call, but we will retrieve the current
+                * VSI context.
+                */
+               ctxt.seid = pf->main_vsi_seid;
+               ctxt.pf_num = pf->hw.pf_id;
+               ctxt.vf_num = 0;
+               ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+               ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "couldn't get PF vsi config, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
+                       return -ENOENT;
+               }
+               vsi->info = ctxt.info;
+               vsi->info.valid_sections = 0;
+
+               vsi->seid = ctxt.seid;
+               vsi->id = ctxt.vsi_number;
+
+               enabled_tc = i40e_pf_get_tc_map(pf);
+
+               /* MFP mode setup queue map and update VSI */
+               if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
+                    !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
+                       memset(&ctxt, 0, sizeof(ctxt));
+                       ctxt.seid = pf->main_vsi_seid;
+                       ctxt.pf_num = pf->hw.pf_id;
+                       ctxt.vf_num = 0;
+                       i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+                       ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "update vsi failed, err %s aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
+                               ret = -ENOENT;
+                               goto err;
+                       }
+                       /* update the local VSI info queue map */
+                       i40e_vsi_update_queue_map(vsi, &ctxt);
+                       vsi->info.valid_sections = 0;
+               } else {
+                       /* Default/Main VSI is only enabled for TC0
+                        * reconfigure it to enable all TCs that are
+                        * available on the port in SFP mode.
+                        * For MFP case the iSCSI PF would use this
+                        * flow to enable LAN+iSCSI TC.
+                        */
+                       ret = i40e_vsi_config_tc(vsi, enabled_tc);
+                       if (ret) {
+                               dev_info(&pf->pdev->dev,
+                                        "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+                                        enabled_tc,
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
+                               ret = -ENOENT;
+                       }
+               }
+               break;
+
+       case I40E_VSI_FDIR:
+               ctxt.pf_num = hw->pf_id;
+               ctxt.vf_num = 0;
+               ctxt.uplink_seid = vsi->uplink_seid;
+               ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+               ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+               if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+                   (i40e_is_vsi_uplink_mode_veb(vsi))) {
+                       ctxt.info.valid_sections |=
+                            cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+                       ctxt.info.switch_id =
+                          cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               }
+               i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+               break;
+
+       case I40E_VSI_VMDQ2:
+               ctxt.pf_num = hw->pf_id;
+               ctxt.vf_num = 0;
+               ctxt.uplink_seid = vsi->uplink_seid;
+               ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+               ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+               /* This VSI is connected to VEB so the switch_id
+                * should be set to zero by default.
+                */
+               if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+                       ctxt.info.valid_sections |=
+                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+                       ctxt.info.switch_id =
+                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               }
+
+               /* Setup the VSI tx/rx queue map for TC0 only for now */
+               i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+               break;
+
+       case I40E_VSI_SRIOV:
+               ctxt.pf_num = hw->pf_id;
+               ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+               ctxt.uplink_seid = vsi->uplink_seid;
+               ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+               ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+
+               /* This VSI is connected to VEB so the switch_id
+                * should be set to zero by default.
+                */
+               if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+                       ctxt.info.valid_sections |=
+                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+                       ctxt.info.switch_id =
+                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+               }
+
+               ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+               ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+               if (pf->vf[vsi->vf_id].spoofchk) {
+                       ctxt.info.valid_sections |=
+                               cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+                       ctxt.info.sec_flags |=
+                               (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+                                I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
+               }
+#endif
+               /* Setup the VSI tx/rx queue map for TC0 only for now */
+               i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+               break;
+
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+               ret = i40e_fcoe_vsi_init(vsi, &ctxt);
+               if (ret) {
+                       dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
+                       return ret;
+               }
+               break;
+
+#endif /* I40E_FCOE */
+       default:
+               return -ENODEV;
+       }
+
+       if (vsi->type != I40E_VSI_MAIN) {
+               ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+               if (ret) {
+                       dev_info(&vsi->back->pdev->dev,
+                                "add vsi failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
+                       ret = -ENOENT;
+                       goto err;
+               }
+               vsi->info = ctxt.info;
+               vsi->info.valid_sections = 0;
+               vsi->seid = ctxt.seid;
+               vsi->id = ctxt.vsi_number;
+               val = rd32(&pf->hw, 0x208800 + (4*(vsi->id)));
+               if (!(val & 0x1)) /* MACVSIPRUNEENABLE = 1*/
+                       dev_warn(&vsi->back->pdev->dev,
+                                "Note: VSI source pruning is not being set correctly by FW\n");
+       }
+
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+       /* If macvlan filters already exist, force them to get loaded */
+       list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+               f->changed = true;
+               f_count++;
+
+               /* Expected to have only one MAC filter entry for LAA in list */
+               if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
+                       ether_addr_copy(laa_macaddr, f->macaddr);
+                       found_laa_mac_filter = true;
+               }
+       }
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       if (found_laa_mac_filter) {
+               struct i40e_aqc_remove_macvlan_element_data element;
+
+               memset(&element, 0, sizeof(element));
+               ether_addr_copy(element.mac_addr, laa_macaddr);
+               element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+               ret = i40e_aq_remove_macvlan(hw, vsi->seid,
+                                            &element, 1, NULL);
+               if (ret) {
+                       /* some older FW has a different default */
+                       element.flags |=
+                                      I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+                       i40e_aq_remove_macvlan(hw, vsi->seid,
+                                              &element, 1, NULL);
+               }
+
+               i40e_aq_mac_address_write(hw,
+                                         I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                         laa_macaddr, NULL);
+       }
+
+       if (f_count) {
+               vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+               pf->flags |= I40E_FLAG_FILTER_SYNC;
+       }
+
+       /* Update VSI BW information */
+       ret = i40e_vsi_get_bw_info(vsi);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't get vsi bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               /* VSI is already added so not tearing that up */
+               ret = 0;
+       }
+
+err:
+       return ret;
+}
+
+/**
+ * i40e_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+int i40e_vsi_release(struct i40e_vsi *vsi)
+{
+       struct i40e_mac_filter *f, *ftmp;
+       struct i40e_veb *veb = NULL;
+       struct i40e_pf *pf;
+       u16 uplink_seid;
+       int i, n;
+
+       pf = vsi->back;
+
+       /* release of a VEB-owner or last VSI is not allowed */
+       if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
+               dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
+                        vsi->seid, vsi->uplink_seid);
+               return -ENODEV;
+       }
+       if (vsi == pf->vsi[pf->lan_vsi] &&
+           !test_bit(__I40E_DOWN, &pf->state)) {
+               dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
+               return -ENODEV;
+       }
+
+       uplink_seid = vsi->uplink_seid;
+       if (vsi->type != I40E_VSI_SRIOV) {
+               if (vsi->netdev_registered) {
+                       vsi->netdev_registered = false;
+                       if (vsi->netdev) {
+                               /* results in a call to i40e_close() */
+                               unregister_netdev(vsi->netdev);
+                       }
+               } else {
+                       i40e_vsi_close(vsi);
+               }
+               i40e_vsi_disable_irq(vsi);
+       }
+
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+       list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
+               i40e_del_filter(vsi, f->macaddr, f->vlan,
+                               f->is_vf, f->is_netdev);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       i40e_sync_vsi_filters(vsi, false);
+
+       i40e_vsi_delete(vsi);
+       i40e_vsi_free_q_vectors(vsi);
+       if (vsi->netdev) {
+               free_netdev(vsi->netdev);
+               vsi->netdev = NULL;
+       }
+       i40e_vsi_clear_rings(vsi);
+       i40e_vsi_clear(vsi);
+
+       /* If this was the last thing on the VEB, except for the
+        * controlling VSI, remove the VEB, which puts the controlling
+        * VSI onto the next level down in the switch.
+        *
+        * Well, okay, there's one more exception here: don't remove
+        * the orphan VEBs yet.  We'll wait for an explicit remove request
+        * from up the network stack.
+        */
+       for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
+               if (pf->vsi[i] &&
+                   pf->vsi[i]->uplink_seid == uplink_seid &&
+                   (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+                       n++;      /* count the VSIs */
+               }
+       }
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               if (!pf->veb[i])
+                       continue;
+               if (pf->veb[i]->uplink_seid == uplink_seid)
+                       n++;     /* count the VEBs */
+               if (pf->veb[i]->seid == uplink_seid)
+                       veb = pf->veb[i];
+       }
+       if (n == 0 && veb && veb->uplink_seid != 0)
+               i40e_veb_release(veb);
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after i40e_vsi_mem_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ **/
+static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
+{
+       int ret = -ENOENT;
+       struct i40e_pf *pf = vsi->back;
+
+       if (vsi->q_vectors[0]) {
+               dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+                        vsi->seid);
+               return -EEXIST;
+       }
+
+       if (vsi->base_vector) {
+               dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
+                        vsi->seid, vsi->base_vector);
+               return -EEXIST;
+       }
+
+       ret = i40e_vsi_alloc_q_vectors(vsi);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "failed to allocate %d q_vector for VSI %d, ret=%d\n",
+                        vsi->num_q_vectors, vsi->seid, ret);
+               vsi->num_q_vectors = 0;
+               goto vector_setup_out;
+       }
+
+#if !defined(I40E_LEGACY_INTERRUPT) && !defined(I40E_MSI_INTERRUPT)
+       /* In Legacy mode, we do not have to get any other vector since we
+        * piggyback on the misc/ICR0 for queue interrupts.
+       */
+       if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+               return ret;
+       if (vsi->num_q_vectors)
+               vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+                                                vsi->num_q_vectors, vsi->idx);
+       if (vsi->base_vector < 0) {
+               dev_info(&pf->pdev->dev,
+                        "failed to get tracking for %d vectors for VSI %d, err=%d\n",
+                        vsi->num_q_vectors, vsi->seid, vsi->base_vector);
+               i40e_vsi_free_q_vectors(vsi);
+               ret = -ENOENT;
+               goto vector_setup_out;
+       }
+
+#endif
+vector_setup_out:
+       return ret;
+}
+
+/**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       u8 enabled_tc;
+       int ret;
+
+       i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+       i40e_vsi_clear_rings(vsi);
+
+       i40e_vsi_free_arrays(vsi, false);
+       i40e_set_num_rings_in_vsi(vsi);
+       ret = i40e_vsi_alloc_arrays(vsi, false);
+       if (ret)
+               goto err_vsi;
+
+       ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+       if (ret < 0) {
+               dev_info(&pf->pdev->dev,
+                        "failed to get tracking for %d queues for VSI %d err %d\n",
+                        vsi->alloc_queue_pairs, vsi->seid, ret);
+               goto err_vsi;
+       }
+       vsi->base_queue = ret;
+
+       /* Update the FW view of the VSI. Force a reset of TC and queue
+        * layout configurations.
+        */
+       enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+       pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+       pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+       i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+
+       /* assign it some queues */
+       ret = i40e_alloc_rings(vsi);
+       if (ret)
+               goto err_rings;
+
+       /* map all of the rings to the q_vectors */
+       i40e_vsi_map_rings_to_vectors(vsi);
+       return vsi;
+
+err_rings:
+       i40e_vsi_free_q_vectors(vsi);
+       if (vsi->netdev_registered) {
+               vsi->netdev_registered = false;
+               unregister_netdev(vsi->netdev);
+               free_netdev(vsi->netdev);
+               vsi->netdev = NULL;
+       }
+       i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+       i40e_vsi_clear(vsi);
+       return NULL;
+}
+
+/**
+ * i40e_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @type: VSI type
+ * @uplink_seid: the switch element to link to
+ * @param1: usage depends upon VSI type. For VF types, indicates VF id
+ *
+ * This allocates the sw VSI structure and its queue resources, then add a VSI
+ * to the identified VEB.
+ *
+ * Returns pointer to the successfully allocated and configure VSI sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+                               u16 uplink_seid, u32 param1)
+{
+       struct i40e_vsi *vsi = NULL;
+       struct i40e_veb *veb = NULL;
+       int ret, i;
+       int v_idx;
+
+       /* The requested uplink_seid must be either
+        *     - the PF's port seid
+        *              no VEB is needed because this is the PF
+        *              or this is a Flow Director special case VSI
+        *     - seid of an existing VEB
+        *     - seid of a VSI that owns an existing VEB
+        *     - seid of a VSI that doesn't own a VEB
+        *              a new VEB is created and the VSI becomes the owner
+        *     - seid of the PF VSI, which is what creates the first VEB
+        *              this is a special case of the previous
+        *
+        * Find which uplink_seid we were given and create a new VEB if needed
+        */
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
+                       veb = pf->veb[i];
+                       break;
+               }
+       }
+
+       if (!veb && uplink_seid != pf->mac_seid) {
+
+               for (i = 0; i < pf->num_alloc_vsi; i++) {
+                       if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
+                               vsi = pf->vsi[i];
+                               break;
+                       }
+               }
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
+                                uplink_seid);
+                       return NULL;
+               }
+
+               if (vsi->uplink_seid == pf->mac_seid)
+                       veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+                                            vsi->tc_config.enabled_tc);
+               else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+                       veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+                                            vsi->tc_config.enabled_tc);
+               if (veb) {
+                       if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
+                               dev_info(&vsi->back->pdev->dev,
+                                        "New VSI creation error, uplink seid of LAN VSI expected.\n");
+                               return NULL;
+                       }
+#ifdef HAVE_BRIDGE_ATTRIBS
+                       /* We come up by default in VEPA mode if sriov is not
+                        * already enabled, in which case we can't force VEPA
+                        * mode.
+                        */
+                       if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                               veb->bridge_mode = BRIDGE_MODE_VEPA;
+                               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+                       }
+#endif
+                       i40e_config_bridge_mode(veb);
+               }
+               for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+                       if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+                               veb = pf->veb[i];
+               }
+               if (!veb) {
+                       dev_info(&pf->pdev->dev, "couldn't add VEB\n");
+                       return NULL;
+               }
+
+               vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+               uplink_seid = veb->seid;
+       }
+
+       /* get vsi sw struct */
+       v_idx = i40e_vsi_mem_alloc(pf, type);
+       if (v_idx < 0)
+               goto err_alloc;
+       vsi = pf->vsi[v_idx];
+       if (!vsi)
+               goto err_alloc;
+       vsi->type = type;
+       vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
+
+       if (type == I40E_VSI_MAIN)
+               pf->lan_vsi = v_idx;
+       else if (type == I40E_VSI_SRIOV)
+               vsi->vf_id = param1;
+       /* assign it some queues */
+       ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
+                               vsi->idx);
+       if (ret < 0) {
+               dev_info(&pf->pdev->dev,
+                        "failed to get tracking for %d queues for VSI %d err=%d\n",
+                        vsi->alloc_queue_pairs, vsi->seid, ret);
+               goto err_vsi;
+       }
+       vsi->base_queue = ret;
+
+       /* get a VSI from the hardware */
+       vsi->uplink_seid = uplink_seid;
+       ret = i40e_add_vsi(vsi);
+       if (ret)
+               goto err_vsi;
+
+       switch (vsi->type) {
+       /* setup the netdev if needed */
+       case I40E_VSI_MAIN:
+       case I40E_VSI_VMDQ2:
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+#endif
+               ret = i40e_config_netdev(vsi);
+               if (ret)
+                       goto err_netdev;
+               ret = register_netdev(vsi->netdev);
+               if (ret)
+                       goto err_netdev;
+               vsi->netdev_registered = true;
+               netif_carrier_off(vsi->netdev);
+               /* make sure transmit queues start off as stopped */
+               netif_tx_stop_all_queues(vsi->netdev);
+#ifdef CONFIG_DCB
+#ifdef HAVE_DCBNL_IEEE
+               /* Setup DCB netlink interface */
+               i40e_dcbnl_setup(vsi);
+#endif /* HAVE_DCBNL_IEEE */
+#endif /* CONFIG_DCB */
+               /* fall through */
+
+       case I40E_VSI_FDIR:
+               /* set up vectors and rings if needed */
+               ret = i40e_vsi_setup_vectors(vsi);
+               if (ret)
+                       goto err_msix;
+
+               ret = i40e_alloc_rings(vsi);
+               if (ret)
+                       goto err_rings;
+
+               /* map all of the rings to the q_vectors */
+               i40e_vsi_map_rings_to_vectors(vsi);
+
+               i40e_vsi_reset_stats(vsi);
+               break;
+
+       default:
+               /* no netdev or rings for the other VSI types */
+               break;
+       }
+       return vsi;
+
+err_rings:
+       i40e_vsi_free_q_vectors(vsi);
+err_msix:
+       if (vsi->netdev_registered) {
+               vsi->netdev_registered = false;
+               unregister_netdev(vsi->netdev);
+               free_netdev(vsi->netdev);
+               vsi->netdev = NULL;
+       }
+err_netdev:
+       i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+       i40e_vsi_clear(vsi);
+err_alloc:
+       return NULL;
+}
+
+/**
+ * i40e_veb_get_bw_info - Query VEB BW information
+ * @veb: the veb to query
+ *
+ * Query the Tx scheduler BW configuration data for given VEB
+ **/
+static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+{
+       struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
+       struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
+       struct i40e_pf *pf = veb->pf;
+       struct i40e_hw *hw = &pf->hw;
+       u32 tc_bw_max;
+       int ret = 0;
+       int i;
+
+       ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
+                                                 &bw_data, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "query veb bw config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+               goto out;
+       }
+
+       ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
+                                                  &ets_data, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "query veb bw ets config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+               goto out;
+       }
+
+       veb->bw_limit = LE16_TO_CPU(ets_data.port_bw_limit);
+       veb->bw_max_quanta = ets_data.tc_bw_max;
+       veb->is_abs_credits = bw_data.absolute_credits_enable;
+       veb->enabled_tc = ets_data.tc_valid_bits;
+       tc_bw_max = LE16_TO_CPU(bw_data.tc_bw_max[0]) |
+                   (LE16_TO_CPU(bw_data.tc_bw_max[1]) << 16);
+       for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+               veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
+               veb->bw_tc_limit_credits[i] =
+                                       LE16_TO_CPU(bw_data.tc_bw_limits[i]);
+               veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
+       }
+
+out:
+       return ret;
+}
+
+/**
+ * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
+ * @pf: board private structure
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_veb_mem_alloc(struct i40e_pf *pf)
+{
+       int ret = -ENOENT;
+       struct i40e_veb *veb;
+       int i;
+
+       /* Need to protect the allocation of switch elements at the PF level */
+       mutex_lock(&pf->switch_mutex);
+
+       /* VEB list may be fragmented if VEB creation/destruction has
+        * been happening.  We can afford to do a quick scan to look
+        * for any free slots in the list.
+        *
+        * find next empty veb slot, looping back around if necessary
+        */
+       i = 0;
+       while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
+               i++;
+       if (i >= I40E_MAX_VEB) {
+               ret = -ENOMEM;
+               goto err_alloc_veb;  /* out of VEB slots! */
+       }
+
+       veb = kzalloc(sizeof(*veb), GFP_KERNEL);
+       if (!veb) {
+               ret = -ENOMEM;
+               goto err_alloc_veb;
+       }
+       veb->pf = pf;
+       veb->idx = i;
+       veb->enabled_tc = 1;
+
+       pf->veb[i] = veb;
+       ret = i;
+err_alloc_veb:
+       mutex_unlock(&pf->switch_mutex);
+       return ret;
+}
+
+/**
+ * i40e_switch_branch_release - Delete a branch of the switch tree
+ * @branch: where to start deleting
+ *
+ * This uses recursion to find the tips of the branch to be
+ * removed, deleting until we get back to and can delete this VEB.
+ **/
+static void i40e_switch_branch_release(struct i40e_veb *branch)
+{
+       struct i40e_pf *pf = branch->pf;
+       u16 branch_seid = branch->seid;
+       u16 veb_idx = branch->idx;
+       int i;
+
+       /* release any VEBs on this VEB - RECURSION */
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               if (!pf->veb[i])
+                       continue;
+               if (pf->veb[i]->uplink_seid == branch->seid)
+                       i40e_switch_branch_release(pf->veb[i]);
+       }
+
+       /* Release the VSIs on this VEB, but not the owner VSI.
+        *
+        * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
+        *       the VEB itself, so don't use (*branch) after this loop.
+        */
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (!pf->vsi[i])
+                       continue;
+               if (pf->vsi[i]->uplink_seid == branch_seid &&
+                  (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+                       i40e_vsi_release(pf->vsi[i]);
+               }
+       }
+
+       /* There's one corner case where the VEB might not have been
+        * removed, so double check it here and remove it if needed.
+        * This case happens if the veb was created from the debugfs
+        * commands and no VSIs were added to it.
+        */
+       if (pf->veb[veb_idx])
+               i40e_veb_release(pf->veb[veb_idx]);
+}
+
+/**
+ * i40e_veb_clear - remove veb struct
+ * @veb: the veb to remove
+ **/
+static void i40e_veb_clear(struct i40e_veb *veb)
+{
+       if (!veb)
+               return;
+
+       if (veb->pf) {
+               struct i40e_pf *pf = veb->pf;
+
+               mutex_lock(&pf->switch_mutex);
+               if (pf->veb[veb->idx] == veb)
+                       pf->veb[veb->idx] = NULL;
+               mutex_unlock(&pf->switch_mutex);
+       }
+
+       kfree(veb);
+}
+
+/**
+ * i40e_veb_release - Delete a VEB and free its resources
+ * @veb: the VEB being removed
+ **/
+void i40e_veb_release(struct i40e_veb *veb)
+{
+       struct i40e_vsi *vsi = NULL;
+       struct i40e_pf *pf;
+       int i, n = 0;
+
+       pf = veb->pf;
+
+       /* find the remaining VSI and check for extras */
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+                       n++;
+                       vsi = pf->vsi[i];
+               }
+       }
+       if (n != 1) {
+               dev_info(&pf->pdev->dev,
+                        "can't remove VEB %d with %d VSIs left\n",
+                        veb->seid, n);
+               return;
+       }
+
+       /* move the remaining VSI to uplink veb */
+       vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+       if (veb->uplink_seid) {
+               vsi->uplink_seid = veb->uplink_seid;
+               if (veb->uplink_seid == pf->mac_seid)
+                       vsi->veb_idx = I40E_NO_VEB;
+               else
+                       vsi->veb_idx = veb->veb_idx;
+       } else {
+               /* floating VEB */
+               vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+               vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+       }
+
+       i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+       i40e_veb_clear(veb);
+}
+
+/**
+ * i40e_add_veb - create the VEB in the switch
+ * @veb: the VEB to be instantiated
+ * @vsi: the controlling VSI
+ **/
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = veb->pf;
+       bool is_default = pf->cur_promisc;
+       bool is_cloud = false;
+       int ret;
+
+       /* get a VEB from the hardware */
+       ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
+                             veb->enabled_tc, is_default,
+                             is_cloud, &veb->seid, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't add VEB, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               return -EPERM;
+       }
+
+       /* get statistics counter */
+       ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
+                                        &veb->stats_idx, NULL, NULL, NULL);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't get VEB statistics idx, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               return -EPERM;
+       }
+       ret = i40e_veb_get_bw_info(veb);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't get VEB bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+               return -ENOENT;
+       }
+
+       vsi->uplink_seid = veb->seid;
+       vsi->veb_idx = veb->idx;
+       vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+
+       return 0;
+}
+
+/**
+ * i40e_veb_setup - Set up a VEB
+ * @pf: board private structure
+ * @flags: VEB setup flags
+ * @uplink_seid: the switch element to link to
+ * @vsi_seid: the initial VSI seid
+ * @enabled_tc: Enabled TC bit-map
+ *
+ * This allocates the sw VEB structure and links it into the switch
+ * It is possible and legal for this to be a duplicate of an already
+ * existing VEB.  It is also possible for both uplink and vsi seids
+ * to be zero, in order to create a floating VEB.
+ *
+ * Returns pointer to the successfully allocated VEB sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
+                               u16 uplink_seid, u16 vsi_seid,
+                               u8 enabled_tc)
+{
+       struct i40e_veb *veb, *uplink_veb = NULL;
+       int vsi_idx, veb_idx;
+       int ret;
+
+       /* if one seid is 0, the other must be 0 to create a floating relay */
+       if ((uplink_seid == 0 || vsi_seid == 0) &&
+           (uplink_seid + vsi_seid != 0)) {
+               dev_info(&pf->pdev->dev,
+                        "one, not both seid's are 0: uplink=%d vsi=%d\n",
+                        uplink_seid, vsi_seid);
+               return NULL;
+       }
+
+       /* make sure there is such a vsi and uplink */
+       for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
+               if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
+                       break;
+       if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
+               dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
+                        vsi_seid);
+               return NULL;
+       }
+
+       if (uplink_seid && uplink_seid != pf->mac_seid) {
+               for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+                       if (pf->veb[veb_idx] &&
+                           pf->veb[veb_idx]->seid == uplink_seid) {
+                               uplink_veb = pf->veb[veb_idx];
+                               break;
+                       }
+               }
+               if (!uplink_veb) {
+                       dev_info(&pf->pdev->dev,
+                                "uplink seid %d not found\n", uplink_seid);
+                       return NULL;
+               }
+       }
+
+       /* get veb sw struct */
+       veb_idx = i40e_veb_mem_alloc(pf);
+       if (veb_idx < 0)
+               goto err_alloc;
+       veb = pf->veb[veb_idx];
+       veb->flags = flags;
+       veb->uplink_seid = uplink_seid;
+       veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
+       veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
+
+       /* create the VEB in the switch */
+       ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+       if (ret)
+               goto err_veb;
+       if (vsi_idx == pf->lan_vsi)
+               pf->lan_veb = veb->idx;
+
+       return veb;
+
+err_veb:
+       i40e_veb_clear(veb);
+err_alloc:
+       return NULL;
+}
+
+/**
+ * i40e_setup_pf_switch_element - set PF vars based on switch type
+ * @pf: board private structure
+ * @ele: element we are building info from
+ * @num_reported: total number of elements
+ * @printconfig: should we print the contents
+ *
+ * helper function to assist in extracting a few useful SEID values.
+ **/
+static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
+                               struct i40e_aqc_switch_config_element_resp *ele,
+                               u16 num_reported, bool printconfig)
+{
+       u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
+       u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
+       u8 element_type = ele->element_type;
+       u16 seid = le16_to_cpu(ele->seid);
+
+       if (printconfig)
+               dev_info(&pf->pdev->dev,
+                        "type=%d seid=%d uplink=%d downlink=%d\n",
+                        element_type, seid, uplink_seid, downlink_seid);
+
+       switch (element_type) {
+       case I40E_SWITCH_ELEMENT_TYPE_MAC:
+               pf->mac_seid = seid;
+               break;
+       case I40E_SWITCH_ELEMENT_TYPE_VEB:
+               /* Main VEB? */
+               if (uplink_seid != pf->mac_seid)
+                       break;
+               if (pf->lan_veb == I40E_NO_VEB) {
+                       int v;
+
+                       /* find existing or else empty VEB */
+                       for (v = 0; v < I40E_MAX_VEB; v++) {
+                               if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
+                                       pf->lan_veb = v;
+                                       break;
+                               }
+                       }
+                       if (pf->lan_veb == I40E_NO_VEB) {
+                               v = i40e_veb_mem_alloc(pf);
+                               if (v < 0)
+                                       break;
+                               pf->lan_veb = v;
+                       }
+               }
+
+               pf->veb[pf->lan_veb]->seid = seid;
+               pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
+               pf->veb[pf->lan_veb]->pf = pf;
+               pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
+               break;
+       case I40E_SWITCH_ELEMENT_TYPE_VSI:
+               if (num_reported != 1)
+                       break;
+               /* This is immediately after a reset so we can assume this is
+                * the PF's VSI
+                */
+               pf->mac_seid = uplink_seid;
+               pf->pf_seid = downlink_seid;
+               pf->main_vsi_seid = seid;
+               if (printconfig)
+                       dev_info(&pf->pdev->dev,
+                                "pf_seid=%d main_vsi_seid=%d\n",
+                                pf->pf_seid, pf->main_vsi_seid);
+               break;
+       case I40E_SWITCH_ELEMENT_TYPE_PF:
+       case I40E_SWITCH_ELEMENT_TYPE_VF:
+       case I40E_SWITCH_ELEMENT_TYPE_EMP:
+       case I40E_SWITCH_ELEMENT_TYPE_BMC:
+       case I40E_SWITCH_ELEMENT_TYPE_PE:
+       case I40E_SWITCH_ELEMENT_TYPE_PA:
+               /* ignore these for now */
+               break;
+       default:
+               dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
+                        element_type, seid);
+               break;
+       }
+}
+
+/**
+ * i40e_fetch_switch_configuration - Get switch config from firmware
+ * @pf: board private structure
+ * @printconfig: should we print the contents
+ *
+ * Get the current switch configuration from the device and
+ * extract a few useful SEID values.
+ **/
+int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
+{
+       struct i40e_aqc_get_switch_config_resp *sw_config;
+       u16 next_seid = 0;
+       int ret = 0;
+       u8 *aq_buf;
+       int i;
+
+       aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
+       if (!aq_buf)
+               return -ENOMEM;
+
+       sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+       do {
+               u16 num_reported, num_total;
+
+               ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
+                                               I40E_AQ_LARGE_BUF,
+                                               &next_seid, NULL);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "get switch config failed err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
+                       kfree(aq_buf);
+                       return -ENOENT;
+               }
+
+               num_reported = le16_to_cpu(sw_config->header.num_reported);
+               num_total = le16_to_cpu(sw_config->header.num_total);
+
+               if (printconfig)
+                       dev_info(&pf->pdev->dev,
+                                "header: %d reported %d total\n",
+                                num_reported, num_total);
+
+               for (i = 0; i < num_reported; i++) {
+                       struct i40e_aqc_switch_config_element_resp *ele =
+                               &sw_config->element[i];
+
+                       i40e_setup_pf_switch_element(pf, ele, num_reported,
+                                                    printconfig);
+               }
+       } while (next_seid != 0);
+
+       kfree(aq_buf);
+       return ret;
+}
+
+/**
+ * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
+{
+       int ret;
+
+       /* find out what's out there already */
+       ret = i40e_fetch_switch_configuration(pf, false);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't fetch switch config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               return ret;
+       }
+       i40e_pf_reset_stats(pf);
+
+       /* first time setup */
+       if (pf->lan_vsi == I40E_NO_VSI || reinit) {
+               struct i40e_vsi *vsi = NULL;
+               u16 uplink_seid;
+
+               /* Set up the PF VSI associated with the PF's main VSI
+                * that is already in the HW switch
+                */
+               if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+                       uplink_seid = pf->veb[pf->lan_veb]->seid;
+               else
+                       uplink_seid = pf->mac_seid;
+               if (pf->lan_vsi == I40E_NO_VSI)
+                       vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+               else if (reinit)
+                       vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
+               if (!vsi) {
+                       dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
+                       i40e_fdir_teardown(pf);
+                       return -EAGAIN;
+               }
+       } else {
+               /* force a reset of TC and queue layout configurations */
+               u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+
+               pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+               pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+               i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+       }
+       i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+
+       i40e_fdir_sb_setup(pf);
+
+       /* Setup static PF queue filter control settings */
+       ret = i40e_setup_pf_filter_control(pf);
+       if (ret) {
+               dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
+                        ret);
+               /* Failure here should not stop continuing other steps */
+       }
+
+       /* enable RSS in the HW, even for only one queue, as the stack can use
+        * the hash
+        */
+       if ((pf->flags & I40E_FLAG_RSS_ENABLED))
+               i40e_config_rss(pf);
+
+       /* fill in link information and enable LSE reporting */
+       i40e_update_link_info(&pf->hw);
+       i40e_link_event(pf);
+
+       /* Initialize user-specific link properties */
+       pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+                                 I40E_AQ_AN_COMPLETED) ? true : false);
+
+#ifdef HAVE_PTP_1588_CLOCK
+       i40e_ptp_init(pf);
+
+#endif /* HAVE_PTP_1588_CLOCK */
+       return ret;
+}
+
+/**
+ * i40e_determine_queue_usage - Work out queue distribution
+ * @pf: board private structure
+ **/
+static void i40e_determine_queue_usage(struct i40e_pf *pf)
+{
+       int queues_left;
+
+       pf->num_lan_qps = 0;
+#ifdef I40E_FCOE
+       pf->num_fcoe_qps = 0;
+#endif
+
+       /* Find the max queues to be put into basic use.  We'll always be
+        * using TC0, whether or not DCB is running, and TC0 will get the
+        * big RSS set.
+        */
+       queues_left = pf->hw.func_caps.num_tx_qp;
+
+       if ((queues_left == 1) ||
+           !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+               /* one qp for PF, no queues for anything else */
+               queues_left = 0;
+               pf->rss_size = pf->num_lan_qps = 1;
+
+               /* make sure all the fancies are disabled */
+               pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
+#ifdef I40E_FCOE
+                              I40E_FLAG_FCOE_ENABLED   |
+#endif
+                              I40E_FLAG_FD_SB_ENABLED  |
+                              I40E_FLAG_FD_ATR_ENABLED |
+                              I40E_FLAG_DCB_CAPABLE    |
+                              I40E_FLAG_SRIOV_ENABLED  |
+                              I40E_FLAG_VMDQ_ENABLED);
+       } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
+                                 I40E_FLAG_FD_SB_ENABLED |
+                                 I40E_FLAG_FD_ATR_ENABLED |
+                                 I40E_FLAG_DCB_CAPABLE))) {
+               /* one qp for PF */
+               pf->rss_size = pf->num_lan_qps = 1;
+               queues_left -= pf->num_lan_qps;
+
+               pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
+#ifdef I40E_FCOE
+                              I40E_FLAG_FCOE_ENABLED   |
+#endif
+                              I40E_FLAG_FD_SB_ENABLED  |
+                              I40E_FLAG_FD_ATR_ENABLED |
+                              I40E_FLAG_DCB_ENABLED    |
+                              I40E_FLAG_VMDQ_ENABLED);
+       } else {
+               /* Not enough queues for all TCs */
+               if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
+                   (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
+                       pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+                       dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
+               }
+               pf->num_lan_qps = max_t(int, pf->rss_size_max,
+                                       num_online_cpus());
+               pf->num_lan_qps = min_t(int, pf->num_lan_qps,
+                                       pf->hw.func_caps.num_tx_qp);
+
+               queues_left -= pf->num_lan_qps;
+       }
+
+#ifdef I40E_FCOE
+       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+               if (I40E_DEFAULT_FCOE <= queues_left) {
+                       pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
+               } else if (I40E_MINIMUM_FCOE <= queues_left) {
+                       pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
+               } else {
+                       pf->num_fcoe_qps = 0;
+                       pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
+                       dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
+               }
+
+               queues_left -= pf->num_fcoe_qps;
+       }
+
+#endif
+       if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+               if (queues_left > 1) {
+                       queues_left -= 1; /* save 1 queue for FD */
+               } else {
+                       pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
+               }
+       }
+
+       if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+           pf->num_vf_qps && pf->num_req_vfs && queues_left) {
+               pf->num_req_vfs = min_t(int, pf->num_req_vfs,
+                                       (queues_left / pf->num_vf_qps));
+               queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
+       }
+
+       if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+           pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
+               pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
+                                         (queues_left / pf->num_vmdq_qps));
+               queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
+       }
+
+       pf->queues_left = queues_left;
+       dev_dbg(&pf->pdev->dev,
+               "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
+               pf->hw.func_caps.num_tx_qp,
+               !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+               pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
+               pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
+#ifdef I40E_FCOE
+       dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
+#endif
+}
+
+/**
+ * i40e_setup_pf_filter_control - Setup PF static filter control
+ * @pf: PF to be setup
+ *
+ * i40e_setup_pf_filter_control sets up a PF's initial filter control
+ * settings. If PE/FCoE are enabled then it will also set the per PF
+ * based filter sizes required for them. It also enables Flow director,
+ * ethertype and macvlan type filter settings for the pf.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
+{
+       struct i40e_filter_control_settings *settings = &pf->filter_settings;
+
+       settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
+
+       /* Flow Director is enabled */
+       if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
+               settings->enable_fdir = true;
+
+       /* Ethtype and MACVLAN filters enabled for PF */
+       settings->enable_ethtype = true;
+       settings->enable_macvlan = true;
+
+       if (i40e_set_filter_control(&pf->hw, settings))
+               return -ENOENT;
+
+       return 0;
+}
+
+#define INFO_STRING_LEN 255
+static void i40e_print_features(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       char *buf, *string;
+
+       string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
+       if (!string) {
+               dev_err(&pf->pdev->dev, "Features string allocation failed\n");
+               return;
+       }
+
+       buf = string;
+
+       buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
+#ifdef CONFIG_PCI_IOV
+       buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
+#endif
+       buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", pf->hw.func_caps.num_vsis,
+                      pf->vsi[pf->lan_vsi]->num_queue_pairs,
+                      pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
+
+       if (pf->flags & I40E_FLAG_RSS_ENABLED)
+               buf += sprintf(buf, "RSS ");
+       if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+               buf += sprintf(buf, "FD_ATR ");
+       if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+               buf += sprintf(buf, "FD_SB ");
+               buf += sprintf(buf, "NTUPLE ");
+       }
+       if (pf->flags & I40E_FLAG_DCB_CAPABLE)
+               buf += sprintf(buf, "DCB ");
+       buf += sprintf(buf, "VxLAN ");
+#ifdef HAVE_PTP_1588_CLOCK
+       if (pf->flags & I40E_FLAG_PTP)
+               buf += sprintf(buf, "PTP ");
+#endif
+#ifdef I40E_FCOE
+       if (pf->flags & I40E_FLAG_FCOE_ENABLED)
+               buf += sprintf(buf, "FCOE ");
+#endif
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               buf += sprintf(buf, "VEB ");
+       else
+               buf += sprintf(buf, "VEPA ");
+
+       BUG_ON(buf > (string + INFO_STRING_LEN));
+       dev_info(&pf->pdev->dev, "%s\n", string);
+       kfree(string);
+}
+
+/**
+ * i40e_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40e_pci_tbl
+ *
+ * i40e_probe initializes a PF identified by a pci_dev structure.
+ * The OS initialization, configuring of the PF private structure,
+ * and a hardware reset occur.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+#ifdef HAVE_CONFIG_HOTPLUG
+static int __devinit i40e_probe(struct pci_dev *pdev,
+                               const struct pci_device_id *ent)
+#else
+static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+#endif
+{
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       struct i40e_pf *pf;
+       struct i40e_hw *hw;
+       static u16 pfs_found;
+       u16 wol_nvm_bits;
+       u16 link_status;
+       int err = 0;
+       u32 len;
+       u32 i;
+       u8 set_fc_aq_fail;
+
+       err = pci_enable_device_mem(pdev);
+       if (err)
+               return err;
+
+       /* set up for high or low dma */
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (err)
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(pci_dev_to_dev(pdev),
+                       "DMA configuration failed: 0x%x\n", err);
+               goto err_dma;
+       }
+
+       /* set up pci connections */
+       err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+                                          IORESOURCE_MEM), i40e_driver_name);
+       if (err) {
+               dev_info(&pdev->dev,
+                        "pci_request_selected_regions failed %d\n", err);
+               goto err_pci_reg;
+       }
+
+       pci_enable_pcie_error_reporting(pdev);
+       pci_set_master(pdev);
+
+       /* Now that we have a PCI connection, we need to do the
+        * low level device setup.  This is primarily setting up
+        * the Admin Queue structures and then querying for the
+        * device's current profile information.
+        */
+       pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+       if (!pf) {
+               err = -ENOMEM;
+               goto err_pf_alloc;
+       }
+       pf->next_vsi = 0;
+       pf->pdev = pdev;
+       pci_set_drvdata(pdev, pf);
+       set_bit(__I40E_DOWN, &pf->state);
+
+       hw = &pf->hw;
+       hw->back = pf;
+
+       pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
+                               I40E_MAX_CSR_SPACE);
+
+       hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
+       if (!hw->hw_addr) {
+               err = -EIO;
+               dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
+                        (unsigned int)pci_resource_start(pdev, 0),
+                        pf->ioremap_len, err);
+               goto err_ioremap;
+       }
+       hw->vendor_id = pdev->vendor;
+       hw->device_id = pdev->device;
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+       hw->subsystem_vendor_id = pdev->subsystem_vendor;
+       hw->subsystem_device_id = pdev->subsystem_device;
+       hw->bus.device = PCI_SLOT(pdev->devfn);
+       hw->bus.func = PCI_FUNC(pdev->devfn);
+       pf->instance = pfs_found;
+
+       if (debug != -1)
+               pf->msg_enable = pf->hw.debug_mask = debug;
+
+       /* Reset here to make sure all is clean and to define PF 'n' */
+       /* have to do the PF reset first to "graceful abort" all queues */
+       i40e_clear_hw(hw);
+       err = i40e_pf_reset(hw);
+       if (err) {
+               dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+               goto err_pf_reset;
+       }
+       pf->pfr_count++;
+
+       hw->aq.num_arq_entries = I40E_AQ_LEN;
+       hw->aq.num_asq_entries = I40E_AQ_LEN;
+       hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+       hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+       pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
+
+       snprintf(pf->int_name, sizeof(pf->int_name) - 1,
+                "%s-%s:misc",
+                dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
+
+       err = i40e_init_shared_code(hw);
+       if (err) {
+               dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", err);
+               goto err_pf_reset;
+       }
+
+       /* set up a default setting for link flow control */
+       pf->hw.fc.requested_mode = I40E_FC_NONE;
+
+       err = i40e_init_adminq(hw);
+
+       /* provide nvm, fw, api versions */
+       dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
+                hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
+                hw->aq.api_maj_ver, hw->aq.api_min_ver,
+                i40e_nvm_version_str(hw));
+
+       if (err) {
+               dev_info(&pdev->dev,
+                        "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+               goto err_pf_reset;
+       }
+
+       if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+           hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+               dev_info(&pdev->dev,
+                        "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+       else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
+                hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
+               dev_info(&pdev->dev,
+                        "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+
+       i40e_verify_eeprom(pf);
+
+       /* Rev 0 hardware was never productized */
+       if (hw->revision_id < 1)
+               dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
+
+       i40e_clear_pxe_mode(hw);
+       err = i40e_get_capabilities(pf);
+       if (err)
+               goto err_adminq_setup;
+
+       err = i40e_sw_init(pf);
+       if (err) {
+               dev_info(&pdev->dev, "sw_init failed: %d\n", err);
+               goto err_sw_init;
+       }
+
+       err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+                               hw->func_caps.num_rx_qp,
+                               pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+       if (err) {
+               dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
+               goto err_init_lan_hmc;
+       }
+
+       err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+       if (err) {
+               dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
+               err = -ENOENT;
+               goto err_configure_lan_hmc;
+       }
+
+       /* Disable LLDP for NICs that have firmware versions lower than v4.3.
+        * Ignore error return codes because if it was already disabled via
+        * hardware settings this will fail
+        */
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
+           (pf->hw.aq.fw_maj_ver < 4)) {
+               dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
+               i40e_aq_stop_lldp(hw, true, NULL);
+       }
+
+       i40e_get_mac_addr(hw, hw->mac.addr);
+       if (!is_valid_ether_addr(hw->mac.addr)) {
+               dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
+               err = -EIO;
+               goto err_mac_addr;
+       }
+       dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
+       ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
+       i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+       if (is_valid_ether_addr(hw->mac.port_addr))
+               pf->flags |= I40E_FLAG_PORT_ID_VALID;
+#ifdef I40E_FCOE
+       err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
+       if (err)
+               dev_info(&pdev->dev,
+                        "(non-fatal) SAN MAC retrieval failed: %d\n", err);
+       if (!is_valid_ether_addr(hw->mac.san_addr)) {
+               dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
+                        hw->mac.san_addr);
+               ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
+       }
+       dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
+#endif /* I40E_FCOE */
+
+#ifdef HAVE_PCI_ERS
+       pci_save_state(pdev);
+#endif
+#ifdef CONFIG_DCB
+       err = i40e_init_pf_dcb(pf);
+       if (err) {
+               dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
+               pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+               /* Continue without DCB enabled */
+       }
+#endif /* CONFIG_DCB */
+
+
+       /* set up periodic task facility */
+       setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
+       pf->service_timer_period = HZ;
+
+       INIT_WORK(&pf->service_task, i40e_service_task);
+       clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+       pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
+
+       /* NVM bit on means WoL disabled for the port */
+       i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+       if (BIT_ULL(hw->port) & wol_nvm_bits || hw->partition_id != 1)
+               pf->wol_en = false;
+       else
+               pf->wol_en = true;
+       device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+
+       /* set up the main switch operations */
+       i40e_determine_queue_usage(pf);
+       err = i40e_init_interrupt_scheme(pf);
+       if (err)
+               goto err_switch_setup;
+
+       /* The number of VSIs reported by the FW is the minimum guaranteed
+        * to us; HW supports far more and we share the remaining pool with
+        * the other PFs. We allocate space for more than the guarantee with
+        * the understanding that we might not get them all later.
+        */
+       if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+               pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+       else
+               pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+       /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
+       len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
+       pf->vsi = kzalloc(len, GFP_KERNEL);
+       if (!pf->vsi) {
+               err = -ENOMEM;
+               goto err_switch_setup;
+       }
+
+#ifdef CONFIG_PCI_IOV
+       /* prep for VF support */
+       if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+           (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+           !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+               if (pci_num_vf(pdev))
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+               else if (pf->num_req_vfs)
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+#endif
+       }
+#endif
+       err = i40e_setup_pf_switch(pf, false);
+       if (err) {
+               dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
+               goto err_vsis;
+       }
+
+       /* Make sure flow control is set according to current settings */
+       err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+       if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+               dev_dbg(&pf->pdev->dev,
+                        "Set fc with err %s aq_err %s on get_phy_cap\n",
+                        i40e_stat_str(hw, err),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+       if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+               dev_dbg(&pf->pdev->dev,
+                        "Set fc with err %s aq_err %s on set_phy_config\n",
+                        i40e_stat_str(hw, err),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+       if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+               dev_dbg(&pf->pdev->dev,
+                        "Set fc with err %s aq_err %s on get_link_info\n",
+                        i40e_stat_str(hw, err),
+                        i40e_aq_str(hw, hw->aq.asq_last_status));
+
+       /* if FDIR VSI was set up, start it now */
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+                       i40e_vsi_open(pf->vsi[i]);
+                       break;
+               }
+       }
+
+       /* driver is only interested in link up/down and module qualification
+        * reports from firmware
+        */
+       err = i40e_aq_set_phy_int_mask(&pf->hw,
+                                      I40E_AQ_EVENT_LINK_UPDOWN |
+                                      I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+       if (err)
+               dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4)) {
+               msleep(75);
+               err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+               if (err)
+                       dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, err),
+                                i40e_aq_str(&pf->hw,
+                                             pf->hw.aq.asq_last_status));
+       }
+       /* The main driver is (mostly) up and happy. We need to set this state
+        * before setting up the misc vector or we get a race and the vector
+        * ends up disabled forever.
+        */
+       clear_bit(__I40E_DOWN, &pf->state);
+
+       /* In case of MSIX we are going to setup the misc vector right here
+        * to handle admin queue events etc. In case of legacy and MSI
+        * the misc functionality and queue processing is combined in
+        * the same vector and that gets setup at open.
+        */
+       if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+               err = i40e_setup_misc_vector(pf);
+               if (err) {
+                       dev_info(&pdev->dev,
+                                "setup of misc vector failed: %d\n", err);
+                       goto err_vsis;
+               }
+       }
+
+#ifdef CONFIG_PCI_IOV
+       /* prep for VF support */
+       if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+           (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+           !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+               u32 val;
+
+               /* disable link interrupts for VFs */
+               val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
+               val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+               wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+               i40e_flush(hw);
+
+               if (pci_num_vf(pdev)) {
+                       dev_info(&pdev->dev,
+                                "Active VFs found, allocating resources.\n");
+                       err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
+                       if (err)
+                               dev_info(&pdev->dev,
+                                        "Error %d allocating resources for existing VFs\n",
+                                        err);
+#if !defined(HAVE_SRIOV_CONFIGURE) && !defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+               } else if (pf->num_req_vfs) {
+                       err = i40e_alloc_vfs(pf, pf->num_req_vfs);
+                       if (err) {
+                               pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+                               dev_info(&pdev->dev,
+                                        "failed to alloc vfs: %d\n", err);
+                       }
+#endif /* HAVE_SRIOV_CONFIGURE */
+               }
+       }
+#endif /* CONFIG_PCI_IOV */
+
+       pfs_found++;
+       i40e_dbg_pf_init(pf);
+
+       /* tell the firmware that we're starting */
+       i40e_send_version(pf);
+
+       /* since everything's happy, start the service_task timer */
+       mod_timer(&pf->service_timer,
+                 round_jiffies(jiffies + pf->service_timer_period));
+
+#ifdef I40E_FCOE
+       /* create FCoE interface */
+       i40e_fcoe_vsi_setup(pf);
+
+#endif
+#define PCI_SPEED_SIZE 8
+#define PCI_WIDTH_SIZE 8
+       /* Devices on the IOSF bus do not have this information right
+        * and will report PCI Gen 1 x 1 by default so don't bother
+        * checking them.
+        */
+       if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
+               char speed[PCI_SPEED_SIZE] = "Unknown";
+               char width[PCI_WIDTH_SIZE] = "Unknown";
+
+               /* Get the negotiated link width and speed from PCI config
+                * space */
+               pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
+                                         &link_status);
+
+               i40e_set_pci_config_data(hw, link_status);
+
+               switch (hw->bus.speed) {
+               case i40e_bus_speed_8000:
+                       strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+               case i40e_bus_speed_5000:
+                       strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+               case i40e_bus_speed_2500:
+                       strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+               default:
+                       break;
+               }
+               switch (hw->bus.width) {
+               case i40e_bus_width_pcie_x8:
+                       strncpy(width, "8", PCI_WIDTH_SIZE); break;
+               case i40e_bus_width_pcie_x4:
+                       strncpy(width, "4", PCI_WIDTH_SIZE); break;
+               case i40e_bus_width_pcie_x2:
+                       strncpy(width, "2", PCI_WIDTH_SIZE); break;
+               case i40e_bus_width_pcie_x1:
+                       strncpy(width, "1", PCI_WIDTH_SIZE); break;
+               default:
+                       break;
+               }
+
+               dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
+                        speed, width);
+
+               if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+                   hw->bus.speed < i40e_bus_speed_8000) {
+                       dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+                       dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+               }
+       }
+
+       /* get the requested speeds from the fw */
+       err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+       if (err)
+               dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
+                       i40e_stat_str(&pf->hw, err),
+                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+       pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+
+       /* get the supported phy types from the fw */
+       err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+       if (err)
+               dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
+                       i40e_stat_str(&pf->hw, err),
+                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+       pf->hw.phy.phy_types = LE32_TO_CPU(abilities.phy_type);
+
+       /* Add a filter to drop all Flow control frames from any VSI from being
+        * transmitted. By doing so we stop a malicious VF from sending out
+        * PAUSE or PFC frames and potentially controlling traffic for other
+        * PF/VF VSIs.
+        * The FW can still send Flow control frames if enabled.
+        */
+       i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+                                                       pf->main_vsi_seid);
+
+       /* print a string summarizing features */
+       i40e_print_features(pf);
+
+       return 0;
+
+       /* Unwind what we've done if something failed in the setup */
+err_vsis:
+       set_bit(__I40E_DOWN, &pf->state);
+       i40e_clear_interrupt_scheme(pf);
+       kfree(pf->vsi);
+err_switch_setup:
+       i40e_reset_interrupt_capability(pf);
+       del_timer_sync(&pf->service_timer);
+err_mac_addr:
+err_configure_lan_hmc:
+       (void)i40e_shutdown_lan_hmc(hw);
+err_init_lan_hmc:
+       kfree(pf->qp_pile);
+err_sw_init:
+err_adminq_setup:
+       (void)i40e_shutdown_adminq(hw);
+err_pf_reset:
+       dev_warn(&pdev->dev, "previous errors forcing module to load in debug mode\n");
+       i40e_dbg_pf_init(pf);
+       set_bit(__I40E_DEBUG_MODE, &pf->state);
+       return 0;
+err_ioremap:
+       kfree(pf);
+err_pf_alloc:
+       pci_disable_pcie_error_reporting(pdev);
+       pci_release_selected_regions(pdev,
+                                    pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+       pci_disable_device(pdev);
+       return err;
+}
+
+/**
+ * i40e_remove - Device removal routine
+ * @pdev: PCI device information struct
+ *
+ * i40e_remove is called by the PCI subsystem to alert the driver
+ * that is should release a PCI device.  This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+#ifdef HAVE_CONFIG_HOTPLUG
+static void __devexit i40e_remove(struct pci_dev *pdev)
+#else
+static void i40e_remove(struct pci_dev *pdev)
+#endif
+{
+       struct i40e_pf *pf = pci_get_drvdata(pdev);
+#ifdef HAVE_PTP_1588_CLOCK
+       struct i40e_hw *hw = &pf->hw;
+#endif /* HAVE_PTP_1588_CLOCK */
+       i40e_status ret_code;
+       int i;
+
+       i40e_dbg_pf_exit(pf);
+       if (test_bit(__I40E_DEBUG_MODE, &pf->state))
+               goto unmap;
+
+#ifdef HAVE_PTP_1588_CLOCK
+       i40e_ptp_stop(pf);
+
+       /* Disable RSS in hw */
+       wr32(hw, I40E_PFQF_HENA(0), 0);
+       wr32(hw, I40E_PFQF_HENA(1), 0);
+
+#endif /* HAVE_PTP_1588_CLOCK */
+       /* no more scheduling of any task */
+       set_bit(__I40E_DOWN, &pf->state);
+       del_timer_sync(&pf->service_timer);
+       cancel_work_sync(&pf->service_task);
+
+       if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+               i40e_free_vfs(pf);
+               pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+       }
+
+       i40e_fdir_teardown(pf);
+
+       /* If there is a switch structure or any orphans, remove them.
+        * This will leave only the PF's VSI remaining.
+        */
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               if (!pf->veb[i])
+                       continue;
+
+               if (pf->veb[i]->uplink_seid == pf->mac_seid ||
+                   pf->veb[i]->uplink_seid == 0)
+                       i40e_switch_branch_release(pf->veb[i]);
+       }
+
+       /* Now we can shutdown the PF's VSI, just before we kill
+        * adminq and hmc.
+        */
+       if (pf->vsi[pf->lan_vsi])
+               i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+
+       /* shutdown and destroy the HMC */
+       if (pf->hw.hmc.hmc_obj) {
+               ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+               if (ret_code)
+                       dev_warn(&pdev->dev,
+                                "Failed to destroy the HMC resources: %d\n",
+                                ret_code);
+       }
+
+       /* shutdown the adminq */
+       ret_code = i40e_shutdown_adminq(&pf->hw);
+       if (ret_code)
+               dev_warn(&pdev->dev,
+                        "Failed to destroy the Admin Queue resources: %d\n",
+                        ret_code);
+
+       /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
+       i40e_clear_interrupt_scheme(pf);
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               if (pf->vsi[i]) {
+                       i40e_vsi_clear_rings(pf->vsi[i]);
+                       i40e_vsi_clear(pf->vsi[i]);
+                       pf->vsi[i] = NULL;
+               }
+       }
+
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               kfree(pf->veb[i]);
+               pf->veb[i] = NULL;
+       }
+
+       kfree(pf->qp_pile);
+       kfree(pf->vsi);
+
+unmap:
+       iounmap(pf->hw.hw_addr);
+       kfree(pf);
+       pci_release_selected_regions(pdev,
+                                    pci_select_bars(pdev, IORESOURCE_MEM));
+
+       pci_disable_pcie_error_reporting(pdev);
+       pci_disable_device(pdev);
+}
+
+#ifdef HAVE_PCI_ERS
+/**
+ * i40e_pci_error_detected - warning that something funky happened in PCI land
+ * @pdev: PCI device information struct
+ *
+ * Called to warn that something happened and the error handling steps
+ * are in progress.  Allows the driver to quiesce things, be ready for
+ * remediation.
+ **/
+static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
+                                               enum pci_channel_state error)
+{
+       struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+       dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+
+       /* shutdown all operations */
+       if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+               rtnl_lock();
+               i40e_prep_for_reset(pf);
+               rtnl_unlock();
+       }
+
+       /* Request a slot reset */
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * i40e_pci_error_slot_reset - a PCI slot reset just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to find if the driver can work with the device now that
+ * the pci slot has been reset.  If a basic connection seems good
+ * (registers are readable and have sane content) then return a
+ * happy little PCI_ERS_RESULT_xxx.
+ **/
+static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
+{
+       struct i40e_pf *pf = pci_get_drvdata(pdev);
+       pci_ers_result_t result;
+       int err;
+       u32 reg;
+
+       dev_dbg(&pdev->dev, "%s\n", __func__);
+       if (pci_enable_device_mem(pdev)) {
+               dev_info(&pdev->dev,
+                        "Cannot re-enable PCI device after reset.\n");
+               result = PCI_ERS_RESULT_DISCONNECT;
+       } else {
+               pci_set_master(pdev);
+               pci_restore_state(pdev);
+               pci_save_state(pdev);
+               pci_wake_from_d3(pdev, false);
+
+               reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+               if (reg == 0)
+                       result = PCI_ERS_RESULT_RECOVERED;
+               else
+                       result = PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       err = pci_cleanup_aer_uncorrect_error_status(pdev);
+       if (err) {
+               dev_info(&pdev->dev,
+                        "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+                        err);
+               /* non-fatal, continue */
+       }
+
+       return result;
+}
+
+/**
+ * i40e_pci_error_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error
+ * and/or reset recovery has finished.
+ **/
+static void i40e_pci_error_resume(struct pci_dev *pdev)
+{
+       struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+       dev_dbg(&pdev->dev, "%s\n", __func__);
+       if (test_bit(__I40E_SUSPENDED, &pf->state))
+               return;
+
+       rtnl_lock();
+       i40e_handle_reset_warning(pf);
+       rtnl_lock();
+}
+
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+       struct i40e_pf *pf = pci_get_drvdata(pdev);
+       struct i40e_hw *hw = &pf->hw;
+
+       set_bit(__I40E_SUSPENDED, &pf->state);
+       set_bit(__I40E_DOWN, &pf->state);
+
+       if (!test_bit(__I40E_DEBUG_MODE, &pf->state)) {
+               del_timer_sync(&pf->service_timer);
+               cancel_work_sync(&pf->service_task);
+               i40e_fdir_teardown(pf);
+
+               rtnl_lock();
+               i40e_prep_for_reset(pf);
+               rtnl_unlock();
+
+               wr32(hw, I40E_PFPM_APM,
+                    (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+               wr32(hw, I40E_PFPM_WUFC,
+                    (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+               i40e_clear_interrupt_scheme(pf);
+       }
+
+       if (system_state == SYSTEM_POWER_OFF) {
+               pci_wake_from_d3(pdev, pf->wol_en);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40e_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct i40e_pf *pf = pci_get_drvdata(pdev);
+       struct i40e_hw *hw = &pf->hw;
+
+       set_bit(__I40E_SUSPENDED, &pf->state);
+       set_bit(__I40E_DOWN, &pf->state);
+
+       if (!test_bit(__I40E_DEBUG_MODE, &pf->state)) {
+               rtnl_lock();
+               i40e_prep_for_reset(pf);
+               rtnl_unlock();
+
+               wr32(hw, I40E_PFPM_APM,
+                    (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+               wr32(hw, I40E_PFPM_WUFC,
+                    (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+       }
+
+       pci_wake_from_d3(pdev, pf->wol_en);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       return 0;
+}
+
+/**
+ * i40e_resume - PCI callback for waking up from D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_resume(struct pci_dev *pdev)
+{
+       struct i40e_pf *pf = pci_get_drvdata(pdev);
+       u32 err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       /* pci_restore_state() clears dev->state_saves, so
+        * call pci_save_state() again to restore it.
+        */
+       pci_save_state(pdev);
+
+       err = pci_enable_device_mem(pdev);
+       if (err) {
+               dev_err(pci_dev_to_dev(pdev), "Cannot enable PCI device from suspend\n");
+               return err;
+       }
+       pci_set_master(pdev);
+
+       /* no wakeup events while running */
+       pci_wake_from_d3(pdev, false);
+
+       /* handling the reset will rebuild the device state */
+       if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
+               clear_bit(__I40E_DOWN, &pf->state);
+               rtnl_lock();
+               i40e_reset_and_rebuild(pf, false);
+               rtnl_unlock();
+       }
+
+       return 0;
+}
+
+#endif
+#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
+static const struct pci_error_handlers i40e_err_handler = {
+#else
+static struct pci_error_handlers i40e_err_handler = {
+#endif
+       .error_detected = i40e_pci_error_detected,
+       .slot_reset = i40e_pci_error_slot_reset,
+       .resume = i40e_pci_error_resume,
+};
+
+#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
+static struct pci_driver_rh i40e_driver_rh = {
+       .sriov_configure = i40e_pci_sriov_configure,
+};
+
+#endif
+#endif /* HAVE_PCI_ERS */
+static struct pci_driver i40e_driver = {
+       .name     = i40e_driver_name,
+       .id_table = i40e_pci_tbl,
+       .probe    = i40e_probe,
+#ifdef HAVE_CONFIG_HOTPLUG
+       .remove   = __devexit_p(i40e_remove),
+#else
+       .remove   = i40e_remove,
+#endif
+#ifdef CONFIG_PM
+       .suspend  = i40e_suspend,
+       .resume   = i40e_resume,
+#endif
+       .shutdown = i40e_shutdown,
+#ifdef HAVE_PCI_ERS
+       .err_handler = &i40e_err_handler,
+#endif
+#ifdef HAVE_SRIOV_CONFIGURE
+       .sriov_configure = i40e_pci_sriov_configure,
+#endif
+#ifdef HAVE_RHEL6_SRIOV_CONFIGURE
+       .rh_reserved = &i40e_driver_rh,
+#endif
+};
+
+/**
+ * i40e_init_module - Driver registration routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40e_init_module(void)
+{
+       pr_info("%s: %s - version %s\n", i40e_driver_name,
+               i40e_driver_string, i40e_driver_version_str);
+       pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+
+       /* we will see if single thread per module is enough for now,
+        * it can't be any worse than using the system workqueue which
+        * was already single threaded
+        */
+       i40e_wq = create_singlethread_workqueue(i40e_driver_name);
+       if (!i40e_wq) {
+               pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
+               return -ENOMEM;
+       }
+
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+       i40e_configfs_init();
+#endif /* CONFIG_CONFIGFS_FS */
+       i40e_dbg_init();
+       return pci_register_driver(&i40e_driver);
+}
+module_init(i40e_init_module);
+
+/**
+ * i40e_exit_module - Driver exit cleanup routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40e_exit_module(void)
+{
+       pci_unregister_driver(&i40e_driver);
+       destroy_workqueue(i40e_wq);
+       i40e_dbg_exit();
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+       i40e_configfs_exit();
+#endif /* CONFIG_CONFIGFS_FS */
+#ifdef HAVE_KFREE_RCU_BARRIER
+       rcu_barrier();
+#endif
+}
+module_exit(i40e_exit_module);
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_nvm.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_nvm.c
new file mode 100644 (file)
index 0000000..30ceba0
--- /dev/null
@@ -0,0 +1,1430 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_prototype.h"
+
+i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+                                              u16 *data);
+i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+                                           u16 *data);
+i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+                                                u16 *words, u16 *data);
+i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+                                             u16 *words, u16 *data);
+i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+                                      u32 offset, u16 words, void *data,
+                                      bool last_command);
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
+ **/
+i40e_status i40e_init_nvm(struct i40e_hw *hw)
+{
+       struct i40e_nvm_info *nvm = &hw->nvm;
+       i40e_status ret_code = I40E_SUCCESS;
+       u32 fla, gens;
+       u8 sr_size;
+
+       /* The SR size is stored regardless of the nvm programming mode
+        * as the blank mode may be used in the factory line.
+        */
+       gens = rd32(hw, I40E_GLNVM_GENS);
+       sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+                          I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+       /* Switching to words (sr_size contains power of 2KB) */
+       nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
+
+       /* Check if we are in the normal or blank NVM programming mode */
+       fla = rd32(hw, I40E_GLNVM_FLA);
+       if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+               /* Max NVM timeout */
+               nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+               nvm->blank_nvm_mode = false;
+       } else { /* Blank programming mode */
+               nvm->blank_nvm_mode = true;
+               ret_code = I40E_ERR_NVM_BLANK_MODE;
+               i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
+       }
+
+       return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+                                      enum i40e_aq_resource_access_type access)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       u64 gtime, timeout;
+       u64 time_left = 0;
+
+       if (hw->nvm.blank_nvm_mode)
+               goto i40e_i40e_acquire_nvm_exit;
+
+       ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+                                           0, &time_left, NULL);
+       /* Reading the Global Device Timer */
+       gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+       /* Store the timeout */
+       hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
+
+       if (ret_code)
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+                          access, time_left, ret_code, hw->aq.asq_last_status);
+
+       if (ret_code && time_left) {
+               /* Poll until the current NVM owner timeouts */
+               timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
+               while ((gtime < timeout) && time_left) {
+                       usleep_range(10000, 20000);
+                       gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+                       ret_code = i40e_aq_request_resource(hw,
+                                                       I40E_NVM_RESOURCE_ID,
+                                                       access, 0, &time_left,
+                                                       NULL);
+                       if (ret_code == I40E_SUCCESS) {
+                               hw->nvm.hw_semaphore_timeout =
+                                           I40E_MS_TO_GTIME(time_left) + gtime;
+                               break;
+                       }
+               }
+               if (ret_code != I40E_SUCCESS) {
+                       hw->nvm.hw_semaphore_timeout = 0;
+                       i40e_debug(hw, I40E_DEBUG_NVM,
+                                  "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+                                  time_left, ret_code, hw->aq.asq_last_status);
+               }
+       }
+
+i40e_i40e_acquire_nvm_exit:
+       return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       u32 total_delay = 0;
+
+       if (hw->nvm.blank_nvm_mode)
+               return;
+
+       ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
+       /* there are some rare cases when trying to release the resource
+        * results in an admin Q timeout, so handle them correctly
+        */
+       while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+              (total_delay < hw->aq.asq_cmd_timeout)) {
+                       usleep_range(1000, 2000);
+                       ret_code = i40e_aq_release_resource(hw,
+                                               I40E_NVM_RESOURCE_ID, 0, NULL);
+                       total_delay++;
+       }
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_ERR_TIMEOUT;
+       u32 srctl, wait_cnt;
+
+       /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+       for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+               srctl = rd32(hw, I40E_GLNVM_SRCTL);
+               if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+                       ret_code = I40E_SUCCESS;
+                       break;
+               }
+               udelay(5);
+       }
+       if (ret_code == I40E_ERR_TIMEOUT)
+               i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
+       return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+                                        u16 *data)
+{
+       return i40e_read_nvm_word_srctl(hw, offset, data);
+}
+
+/**
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+                                              u16 *data)
+{
+       i40e_status ret_code = I40E_ERR_TIMEOUT;
+       u32 sr_reg;
+
+       if (offset >= hw->nvm.sr_size) {
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
+                          offset, hw->nvm.sr_size);
+               ret_code = I40E_ERR_PARAM;
+               goto read_nvm_exit;
+       }
+
+       /* Poll the done bit first */
+       ret_code = i40e_poll_sr_srctl_done_bit(hw);
+       if (ret_code == I40E_SUCCESS) {
+               /* Write the address and start reading */
+               sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+                        BIT(I40E_GLNVM_SRCTL_START_SHIFT);
+               wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+               /* Poll I40E_GLNVM_SRCTL until the done bit is set */
+               ret_code = i40e_poll_sr_srctl_done_bit(hw);
+               if (ret_code == I40E_SUCCESS) {
+                       sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+                       *data = (u16)((sr_reg &
+                                      I40E_GLNVM_SRDATA_RDDATA_MASK)
+                                   >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+               }
+       }
+       if (ret_code != I40E_SUCCESS)
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+                          offset);
+
+read_nvm_exit:
+       return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+                                           u16 *data)
+{
+       i40e_status ret_code = I40E_ERR_TIMEOUT;
+
+       ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+       *data = LE16_TO_CPU(*(__le16 *)data);
+
+       return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+                                          u16 *words, u16 *data)
+{
+       return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+                                                u16 *words, u16 *data)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       u16 index, word;
+
+       /* Loop thru the selected region */
+       for (word = 0; word < *words; word++) {
+               index = offset + word;
+               ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
+               if (ret_code != I40E_SUCCESS)
+                       break;
+       }
+
+       /* Update the number of words read from the Shadow RAM */
+       *words = word;
+
+       return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+                                             u16 *words, u16 *data)
+{
+       i40e_status ret_code;
+       u16 read_size = *words;
+       bool last_cmd = false;
+       u16 words_read = 0;
+       u16 i = 0;
+
+       do {
+               /* Calculate number of bytes we should read in this step.
+                * FVL AQ do not allow to read more than one page at a time or
+                * to cross page boundaries.
+                */
+               if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+                       read_size = min(*words,
+                                       (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+                                     (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+               else
+                       read_size = min((*words - words_read),
+                                       I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+               /* Check if this is last command, if so set proper flag */
+               if ((words_read + read_size) >= *words)
+                       last_cmd = true;
+
+               ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+                                           data + words_read, last_cmd);
+               if (ret_code != I40E_SUCCESS)
+                       goto read_nvm_buffer_aq_exit;
+
+               /* Increment counter for words already read and move offset to
+                * new read location
+                */
+               words_read += read_size;
+               offset += read_size;
+       } while (words_read < *words);
+
+       for (i = 0; i < *words; i++)
+               data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+       *words = words_read;
+       return ret_code;
+}
+
+/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+                                      u32 offset, u16 words, void *data,
+                                      bool last_command)
+{
+       i40e_status ret_code = I40E_ERR_NVM;
+       struct i40e_asq_cmd_details cmd_details;
+
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+       /* Here we are checking the SR limit only for the flat memory model.
+        * We cannot do it for the module-based model, as we did not acquire
+        * the NVM resource yet (we cannot get the module pointer value).
+        * Firmware will check the module-based model.
+        */
+       if ((offset + words) > hw->nvm.sr_size)
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+                          (offset + words), hw->nvm.sr_size);
+       else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+               /* We can write only up to 4KB (one sector), in one AQ write */
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVM write fail error: tried to write %d words, limit is %d.\n",
+                          words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+       else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+                != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+               /* A single write cannot spread over two sectors */
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+                          offset, words);
+       else
+               ret_code = i40e_aq_read_nvm(hw, module_pointer,
+                                           2 * offset,  /*bytes*/
+                                           2 * words,   /*bytes*/
+                                           data, last_command, &cmd_details);
+
+       return ret_code;
+}
+
+/**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+                                       u32 offset, u16 words, void *data,
+                                       bool last_command)
+{
+       i40e_status ret_code = I40E_ERR_NVM;
+       struct i40e_asq_cmd_details cmd_details;
+
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+       /* Here we are checking the SR limit only for the flat memory model.
+        * We cannot do it for the module-based model, as we did not acquire
+        * the NVM resource yet (we cannot get the module pointer value).
+        * Firmware will check the module-based model.
+        */
+       if ((offset + words) > hw->nvm.sr_size)
+               hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
+       else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+               /* We can write only up to 4KB (one sector), in one AQ write */
+               hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
+       else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+                != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+               /* A single write cannot spread over two sectors */
+               hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
+       else
+               ret_code = i40e_aq_update_nvm(hw, module_pointer,
+                                             2 * offset,  /*bytes*/
+                                             2 * words,   /*bytes*/
+                                             data, last_command, &cmd_details);
+
+       return ret_code;
+}
+
+/**
+ * i40e_write_nvm_word - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
+ * NVM ownership have to be acquired and released (on ARQ completion event
+ * reception) by caller. To commit SR to NVM update checksum function
+ * should be called.
+ **/
+i40e_status i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+                                         void *data)
+{
+       *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
+
+       /* Value 0x00 below means that we treat SR as a flat mem */
+       return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
+}
+
+/**
+ * i40e_write_nvm_buffer - Writes Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller. To commit SR to NVM update
+ * checksum function should be called.
+ **/
+i40e_status i40e_write_nvm_buffer(struct i40e_hw *hw,
+                                           u8 module_pointer, u32 offset,
+                                           u16 words, void *data)
+{
+       __le16 *le_word_ptr = (__le16 *)data;
+       u16 *word_ptr = (u16 *)data;
+       u32 i = 0;
+
+       for (i = 0; i < words; i++)
+               le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
+
+       /* Here we will only write one buffer as the size of the modules
+        * mirrored in the Shadow RAM is always less than 4K.
+        */
+       return i40e_write_nvm_aq(hw, module_pointer, offset, words,
+                                data, false);
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
+ *
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       struct i40e_virt_mem vmem;
+       u16 pcie_alt_module = 0;
+       u16 checksum_local = 0;
+       u16 vpd_module = 0;
+       u16 *data;
+       u16 i = 0;
+
+       ret_code = i40e_allocate_virt_mem(hw, &vmem,
+                                   I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+       if (ret_code)
+               goto i40e_calc_nvm_checksum_exit;
+       data = (u16 *)vmem.va;
+
+       /* read pointer to VPD area */
+       ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
+       if (ret_code != I40E_SUCCESS) {
+               ret_code = I40E_ERR_NVM_CHECKSUM;
+               goto i40e_calc_nvm_checksum_exit;
+       }
+
+       /* read pointer to PCIe Alt Auto-load module */
+       ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+                                     &pcie_alt_module);
+       if (ret_code != I40E_SUCCESS) {
+               ret_code = I40E_ERR_NVM_CHECKSUM;
+               goto i40e_calc_nvm_checksum_exit;
+       }
+
+       /* Calculate SW checksum that covers the whole 64kB shadow RAM
+        * except the VPD and PCIe ALT Auto-load modules
+        */
+       for (i = 0; i < hw->nvm.sr_size; i++) {
+               /* Read SR page */
+               if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+                       u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+                       ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
+                       if (ret_code != I40E_SUCCESS) {
+                               ret_code = I40E_ERR_NVM_CHECKSUM;
+                               goto i40e_calc_nvm_checksum_exit;
+                       }
+               }
+
+               /* Skip Checksum word */
+               if (i == I40E_SR_SW_CHECKSUM_WORD)
+                       continue;
+               /* Skip VPD module (convert byte size to word count) */
+               if ((i >= (u32)vpd_module) &&
+                   (i < ((u32)vpd_module +
+                    (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+                       continue;
+               }
+               /* Skip PCIe ALT module (convert byte size to word count) */
+               if ((i >= (u32)pcie_alt_module) &&
+                   (i < ((u32)pcie_alt_module +
+                    (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+                       continue;
+               }
+
+               checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
+       }
+
+       *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+       i40e_free_virt_mem(hw, &vmem);
+       return ret_code;
+}
+
+/**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       u16 checksum;
+       __le16 le_sum;
+
+       ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+       le_sum = CPU_TO_LE16(checksum);
+       if (ret_code == I40E_SUCCESS)
+               ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+                                            1, &le_sum, true);
+
+       return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+                                                u16 *checksum)
+{
+       i40e_status ret_code = I40E_SUCCESS;
+       u16 checksum_sr = 0;
+       u16 checksum_local = 0;
+
+       ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+       if (ret_code != I40E_SUCCESS)
+               goto i40e_validate_nvm_checksum_exit;
+
+       /* Do not use i40e_read_nvm_word() because we do not want to take
+        * the synchronization semaphores twice here.
+        */
+       i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+       /* Verify read checksum from EEPROM is the same as
+        * calculated checksum
+        */
+       if (checksum_local != checksum_sr)
+               ret_code = I40E_ERR_NVM_CHECKSUM;
+
+       /* If the user cares, return the calculated checksum */
+       if (checksum)
+               *checksum = checksum_local;
+
+i40e_validate_nvm_checksum_exit:
+       return ret_code;
+}
+
+static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno);
+static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   int *perrno);
+static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+                                                  struct i40e_nvm_access *cmd,
+                                                  int *perrno);
+static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+                                                  struct i40e_nvm_access *cmd,
+                                                  u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+                                                 struct i40e_nvm_access *cmd,
+                                                 u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+                                                struct i40e_nvm_access *cmd,
+                                                u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno);
+static INLINE u8 i40e_nvmupd_get_module(u32 val)
+{
+       return (u8)(val & I40E_NVM_MOD_PNT_MASK);
+}
+static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
+{
+       return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+}
+
+static const char *i40e_nvm_update_state_str[] = {
+       "I40E_NVMUPD_INVALID",
+       "I40E_NVMUPD_READ_CON",
+       "I40E_NVMUPD_READ_SNT",
+       "I40E_NVMUPD_READ_LCB",
+       "I40E_NVMUPD_READ_SA",
+       "I40E_NVMUPD_WRITE_ERA",
+       "I40E_NVMUPD_WRITE_CON",
+       "I40E_NVMUPD_WRITE_SNT",
+       "I40E_NVMUPD_WRITE_LCB",
+       "I40E_NVMUPD_WRITE_SA",
+       "I40E_NVMUPD_CSUM_CON",
+       "I40E_NVMUPD_CSUM_SA",
+       "I40E_NVMUPD_CSUM_LCB",
+       "I40E_NVMUPD_STATUS",
+       "I40E_NVMUPD_EXEC_AQ",
+       "I40E_NVMUPD_GET_AQ_RESULT",
+};
+
+/**
+ * i40e_nvmupd_command - Process an NVM update command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * Dispatches command depending on what update state is current
+ **/
+i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+                                         struct i40e_nvm_access *cmd,
+                                         u8 *bytes, int *perrno)
+{
+       i40e_status status;
+       enum i40e_nvmupd_cmd upd_cmd;
+
+       /* assume success */
+       *perrno = 0;
+
+       /* early check for status command and debug msgs */
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+       i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+                  i40e_nvm_update_state_str[upd_cmd],
+                  hw->nvmupd_state,
+                  hw->aq.nvm_release_on_done);
+
+       if (upd_cmd == I40E_NVMUPD_INVALID) {
+               *perrno = -EFAULT;
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_validate_command returns %d errno %d\n",
+                          upd_cmd, *perrno);
+       }
+
+       /* a status request returns immediately rather than
+        * going into the state machine
+        */
+       if (upd_cmd == I40E_NVMUPD_STATUS) {
+               bytes[0] = hw->nvmupd_state;
+               return I40E_SUCCESS;
+       }
+
+       switch (hw->nvmupd_state) {
+       case I40E_NVMUPD_STATE_INIT:
+               status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
+               break;
+
+       case I40E_NVMUPD_STATE_READING:
+               status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
+               break;
+
+       case I40E_NVMUPD_STATE_WRITING:
+               status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+               break;
+
+       case I40E_NVMUPD_STATE_INIT_WAIT:
+       case I40E_NVMUPD_STATE_WRITE_WAIT:
+               status = I40E_ERR_NOT_READY;
+               *perrno = -EBUSY;
+               break;
+
+       default:
+               /* invalid state, should never happen */
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVMUPD: no such state %d\n", hw->nvmupd_state);
+               status = I40E_NOT_SUPPORTED;
+               *perrno = -ESRCH;
+               break;
+       }
+       return status;
+}
+
+/**
+ * i40e_nvmupd_state_init - Handle NVM update state Init
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * Process legitimate commands of the Init state and conditionally set next
+ * state. Reject all other commands.
+ **/
+static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno)
+{
+       i40e_status status = I40E_SUCCESS;
+       enum i40e_nvmupd_cmd upd_cmd;
+
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+       switch (upd_cmd) {
+       case I40E_NVMUPD_READ_SA:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+               if (status) {
+                       *perrno = i40e_aq_rc_to_posix(status,
+                                                    hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+                       i40e_release_nvm(hw);
+               }
+               break;
+
+       case I40E_NVMUPD_READ_SNT:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+               if (status) {
+                       *perrno = i40e_aq_rc_to_posix(status,
+                                                    hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+                       if (status)
+                               i40e_release_nvm(hw);
+                       else
+                               hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+               }
+               break;
+
+       case I40E_NVMUPD_WRITE_ERA:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+               if (status) {
+                       *perrno = i40e_aq_rc_to_posix(status,
+                                                    hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
+                       if (status) {
+                               i40e_release_nvm(hw);
+                       } else {
+                               hw->aq.nvm_release_on_done = true;
+                               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+                       }
+               }
+               break;
+
+       case I40E_NVMUPD_WRITE_SA:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+               if (status) {
+                       *perrno = i40e_aq_rc_to_posix(status,
+                                                    hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+                       if (status) {
+                               i40e_release_nvm(hw);
+                       } else {
+                               hw->aq.nvm_release_on_done = true;
+                               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+                       }
+               }
+               break;
+
+       case I40E_NVMUPD_WRITE_SNT:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+               if (status) {
+                       *perrno = i40e_aq_rc_to_posix(status,
+                                                    hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+                       if (status)
+                               i40e_release_nvm(hw);
+                       else
+                               hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+               }
+               break;
+
+       case I40E_NVMUPD_CSUM_SA:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+               if (status) {
+                       *perrno = i40e_aq_rc_to_posix(status,
+                                                    hw->aq.asq_last_status);
+               } else {
+                       status = i40e_update_nvm_checksum(hw);
+                       if (status) {
+                               *perrno = hw->aq.asq_last_status ?
+                                  i40e_aq_rc_to_posix(status,
+                                                      hw->aq.asq_last_status) :
+                                  -EIO;
+                               i40e_release_nvm(hw);
+                       } else {
+                               hw->aq.nvm_release_on_done = true;
+                               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+                       }
+               }
+               break;
+
+       case I40E_NVMUPD_EXEC_AQ:
+               status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
+               break;
+
+       case I40E_NVMUPD_GET_AQ_RESULT:
+               status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
+               break;
+
+       default:
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVMUPD: bad cmd %s in init state\n",
+                          i40e_nvm_update_state_str[upd_cmd]);
+               status = I40E_ERR_NVM;
+               *perrno = -ESRCH;
+               break;
+       }
+       return status;
+}
+
+/**
+ * i40e_nvmupd_state_reading - Handle NVM update state Reading
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * NVM ownership is already held.  Process legitimate commands and set any
+ * change in state; reject all other commands.
+ **/
+static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno)
+{
+       i40e_status status = I40E_SUCCESS;
+       enum i40e_nvmupd_cmd upd_cmd;
+
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+       switch (upd_cmd) {
+       case I40E_NVMUPD_READ_SA:
+       case I40E_NVMUPD_READ_CON:
+               status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+               break;
+
+       case I40E_NVMUPD_READ_LCB:
+               status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
+               i40e_release_nvm(hw);
+               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               break;
+
+       default:
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVMUPD: bad cmd %s in reading state.\n",
+                          i40e_nvm_update_state_str[upd_cmd]);
+               status = I40E_NOT_SUPPORTED;
+               *perrno = -ESRCH;
+               break;
+       }
+       return status;
+}
+
+/**
+ * i40e_nvmupd_state_writing - Handle NVM update state Writing
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * NVM ownership is already held.  Process legitimate commands and set any
+ * change in state; reject all other commands
+ **/
+static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno)
+{
+       i40e_status status = I40E_SUCCESS;
+       enum i40e_nvmupd_cmd upd_cmd;
+       bool retry_attempt = false;
+
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+retry:
+       switch (upd_cmd) {
+       case I40E_NVMUPD_WRITE_CON:
+               status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+               if (!status)
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+               break;
+
+       case I40E_NVMUPD_WRITE_LCB:
+               status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+               if (status) {
+                       *perrno = hw->aq.asq_last_status ?
+                                  i40e_aq_rc_to_posix(status,
+                                                      hw->aq.asq_last_status) :
+                                  -EIO;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               } else {
+                       hw->aq.nvm_release_on_done = true;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+               }
+               break;
+
+       case I40E_NVMUPD_CSUM_CON:
+               status = i40e_update_nvm_checksum(hw);
+               if (status) {
+                       *perrno = hw->aq.asq_last_status ?
+                                  i40e_aq_rc_to_posix(status,
+                                                      hw->aq.asq_last_status) :
+                                  -EIO;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               } else {
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+               }
+               break;
+
+       case I40E_NVMUPD_CSUM_LCB:
+               status = i40e_update_nvm_checksum(hw);
+               if (status) {
+                       *perrno = hw->aq.asq_last_status ?
+                                  i40e_aq_rc_to_posix(status,
+                                                      hw->aq.asq_last_status) :
+                                  -EIO;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               } else {
+                       hw->aq.nvm_release_on_done = true;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+               }
+               break;
+
+       default:
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVMUPD: bad cmd %s in writing state.\n",
+                          i40e_nvm_update_state_str[upd_cmd]);
+               status = I40E_NOT_SUPPORTED;
+               *perrno = -ESRCH;
+               break;
+       }
+
+       /* In some circumstances, a multi-write transaction takes longer
+        * than the default 3 minute timeout on the write semaphore.  If
+        * the write failed with an EBUSY status, this is likely the problem,
+        * so here we try to reacquire the semaphore then retry the write.
+        * We only do one retry, then give up.
+        */
+       if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+           !retry_attempt) {
+               i40e_status old_status = status;
+               u32 old_asq_status = hw->aq.asq_last_status;
+               u32 gtime;
+
+               gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+               if (gtime >= hw->nvm.hw_semaphore_timeout) {
+                       i40e_debug(hw, I40E_DEBUG_ALL,
+                                  "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+                                  gtime, hw->nvm.hw_semaphore_timeout);
+                       i40e_release_nvm(hw);
+                       status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+                       if (status) {
+                               i40e_debug(hw, I40E_DEBUG_ALL,
+                                          "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+                                          hw->aq.asq_last_status);
+                               status = old_status;
+                               hw->aq.asq_last_status = old_asq_status;
+                       } else {
+                               retry_attempt = true;
+                               goto retry;
+                       }
+               }
+       }
+
+       return status;
+}
+
+/**
+ * i40e_nvmupd_validate_command - Validate given command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
+ **/
+static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   int *perrno)
+{
+       enum i40e_nvmupd_cmd upd_cmd;
+       u8 module, transaction;
+
+       /* anything that doesn't match a recognized case is an error */
+       upd_cmd = I40E_NVMUPD_INVALID;
+
+       transaction = i40e_nvmupd_get_transaction(cmd->config);
+       module = i40e_nvmupd_get_module(cmd->config);
+
+       /* limits on data size */
+       if ((cmd->data_size < 1) ||
+           (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_validate_command data_size %d\n",
+                          cmd->data_size);
+               *perrno = -EFAULT;
+               return I40E_NVMUPD_INVALID;
+       }
+
+       switch (cmd->command) {
+       case I40E_NVM_READ:
+               switch (transaction) {
+               case I40E_NVM_CON:
+                       upd_cmd = I40E_NVMUPD_READ_CON;
+                       break;
+               case I40E_NVM_SNT:
+                       upd_cmd = I40E_NVMUPD_READ_SNT;
+                       break;
+               case I40E_NVM_LCB:
+                       upd_cmd = I40E_NVMUPD_READ_LCB;
+                       break;
+               case I40E_NVM_SA:
+                       upd_cmd = I40E_NVMUPD_READ_SA;
+                       break;
+               case I40E_NVM_EXEC:
+                       if (module == 0xf)
+                               upd_cmd = I40E_NVMUPD_STATUS;
+                       else if (module == 0)
+                               upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+                       break;
+               }
+               break;
+
+       case I40E_NVM_WRITE:
+               switch (transaction) {
+               case I40E_NVM_CON:
+                       upd_cmd = I40E_NVMUPD_WRITE_CON;
+                       break;
+               case I40E_NVM_SNT:
+                       upd_cmd = I40E_NVMUPD_WRITE_SNT;
+                       break;
+               case I40E_NVM_LCB:
+                       upd_cmd = I40E_NVMUPD_WRITE_LCB;
+                       break;
+               case I40E_NVM_SA:
+                       upd_cmd = I40E_NVMUPD_WRITE_SA;
+                       break;
+               case I40E_NVM_ERA:
+                       upd_cmd = I40E_NVMUPD_WRITE_ERA;
+                       break;
+               case I40E_NVM_CSUM:
+                       upd_cmd = I40E_NVMUPD_CSUM_CON;
+                       break;
+               case (I40E_NVM_CSUM|I40E_NVM_SA):
+                       upd_cmd = I40E_NVMUPD_CSUM_SA;
+                       break;
+               case (I40E_NVM_CSUM|I40E_NVM_LCB):
+                       upd_cmd = I40E_NVMUPD_CSUM_LCB;
+                       break;
+               case I40E_NVM_EXEC:
+                       if (module == 0)
+                               upd_cmd = I40E_NVMUPD_EXEC_AQ;
+                       break;
+               }
+               break;
+       }
+
+       return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+                                                struct i40e_nvm_access *cmd,
+                                                u8 *bytes, int *perrno)
+{
+       struct i40e_asq_cmd_details cmd_details;
+       i40e_status status;
+       struct i40e_aq_desc *aq_desc;
+       u32 buff_size = 0;
+       u8 *buff = NULL;
+       u32 aq_desc_len;
+       u32 aq_data_len;
+
+       i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+       aq_desc_len = sizeof(struct i40e_aq_desc);
+       memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+       /* get the aq descriptor */
+       if (cmd->data_size < aq_desc_len) {
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+                          cmd->data_size, aq_desc_len);
+               *perrno = -EINVAL;
+               return I40E_ERR_PARAM;
+       }
+       aq_desc = (struct i40e_aq_desc *)bytes;
+
+       /* if data buffer needed, make sure it's ready */
+       aq_data_len = cmd->data_size - aq_desc_len;
+       buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
+       if (buff_size) {
+               if (!hw->nvm_buff.va) {
+                       status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+                                                       hw->aq.asq_buf_size);
+                       if (status)
+                               i40e_debug(hw, I40E_DEBUG_NVM,
+                                          "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+                                          status);
+               }
+
+               if (hw->nvm_buff.va) {
+                       buff = hw->nvm_buff.va;
+                       memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+               }
+       }
+
+       /* and away we go! */
+       status = i40e_asq_send_command(hw, aq_desc, buff,
+                                      buff_size, &cmd_details);
+       if (status) {
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+                          i40e_stat_str(hw, status),
+                          i40e_aq_str(hw, hw->aq.asq_last_status));
+               *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+                                                   struct i40e_nvm_access *cmd,
+                                                   u8 *bytes, int *perrno)
+{
+       u32 aq_total_len;
+       u32 aq_desc_len;
+       int remainder;
+       u8 *buff;
+
+       i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+       aq_desc_len = sizeof(struct i40e_aq_desc);
+       aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
+
+       /* check offset range */
+       if (cmd->offset > aq_total_len) {
+               i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+                          __func__, cmd->offset, aq_total_len);
+               *perrno = -EINVAL;
+               return I40E_ERR_PARAM;
+       }
+
+       /* check copylength range */
+       if (cmd->data_size > (aq_total_len - cmd->offset)) {
+               int new_len = aq_total_len - cmd->offset;
+
+               i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+                          __func__, cmd->data_size, new_len);
+               cmd->data_size = new_len;
+       }
+
+       remainder = cmd->data_size;
+       if (cmd->offset < aq_desc_len) {
+               u32 len = aq_desc_len - cmd->offset;
+
+               len = min(len, cmd->data_size);
+               i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+                          __func__, cmd->offset, cmd->offset + len);
+
+               buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+               memcpy(bytes, buff, len);
+
+               bytes += len;
+               remainder -= len;
+               buff = hw->nvm_buff.va;
+       } else {
+               buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+       }
+
+       if (remainder > 0) {
+               int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+               i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+                          __func__, start_byte, start_byte + remainder);
+               memcpy(bytes, buff, remainder);
+       }
+
+       return I40E_SUCCESS;
+}
+
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+                                                 struct i40e_nvm_access *cmd,
+                                                 u8 *bytes, int *perrno)
+{
+       struct i40e_asq_cmd_details cmd_details;
+       i40e_status status;
+       u8 module, transaction;
+       bool last;
+
+       transaction = i40e_nvmupd_get_transaction(cmd->config);
+       module = i40e_nvmupd_get_module(cmd->config);
+       last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
+
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+       status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+                                 bytes, last, &cmd_details);
+       if (status) {
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
+                          module, cmd->offset, cmd->data_size);
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_nvm_read status %d aq %d\n",
+                          status, hw->aq.asq_last_status);
+               *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+                                                  struct i40e_nvm_access *cmd,
+                                                  int *perrno)
+{
+       i40e_status status = I40E_SUCCESS;
+       struct i40e_asq_cmd_details cmd_details;
+       u8 module, transaction;
+       bool last;
+
+       transaction = i40e_nvmupd_get_transaction(cmd->config);
+       module = i40e_nvmupd_get_module(cmd->config);
+       last = (transaction & I40E_NVM_LCB);
+
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+       status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+                                  last, &cmd_details);
+       if (status) {
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
+                          module, cmd->offset, cmd->data_size);
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_nvm_erase status %d aq %d\n",
+                          status, hw->aq.asq_last_status);
+               *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+       }
+
+       return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+                                                  struct i40e_nvm_access *cmd,
+                                                  u8 *bytes, int *perrno)
+{
+       i40e_status status = I40E_SUCCESS;
+       struct i40e_asq_cmd_details cmd_details;
+       u8 module, transaction;
+       bool last;
+
+       transaction = i40e_nvmupd_get_transaction(cmd->config);
+       module = i40e_nvmupd_get_module(cmd->config);
+       last = (transaction & I40E_NVM_LCB);
+
+       memset(&cmd_details, 0, sizeof(cmd_details));
+       cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+       status = i40e_aq_update_nvm(hw, module, cmd->offset,
+                                   (u16)cmd->data_size, bytes, last,
+                                   &cmd_details);
+       if (status) {
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+                          module, cmd->offset, cmd->data_size);
+               i40e_debug(hw, I40E_DEBUG_NVM,
+                          "i40e_nvmupd_nvm_write status %d aq %d\n",
+                          status, hw->aq.asq_last_status);
+               *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+       }
+
+       return status;
+}
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_osdep.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_osdep.h
new file mode 100644 (file)
index 0000000..0a515d2
--- /dev/null
@@ -0,0 +1,133 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+#include <linux/highuid.h>
+
+#include <linux/io.h>
+#include <asm-generic/int-ll64.h>
+
+#ifndef readq
+static inline __u64 readq(const volatile void __iomem *addr)
+{
+       const volatile u32 __iomem *p = addr;
+       u32 low, high;
+
+       low = readl(p);
+       high = readl(p + 1);
+
+       return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(__u64 val, volatile void __iomem *addr)
+{
+       writel(val, addr);
+       writel(val >> 32, addr + 4);
+}
+#endif
+#include "kcompat.h"
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#undef ASSERT
+
+#define hw_dbg(hw, S, A...)    do {} while (0)
+
+#define wr32(a, reg, value)    writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg)           readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value)    writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg)           readq((a)->hw_addr + (reg))
+#define i40e_flush(a)          readl((a)->hw_addr + I40E_GLGEN_STAT)
+/* memory allocation tracking */
+struct i40e_dma_mem {
+       void *va;
+       dma_addr_t pa;
+       u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+                       i40e_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+       void *va;
+       u32 size;
+} __packed;
+
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...)                                \
+do {                                                            \
+       if (((m) & (h)->debug_mask))                            \
+               pr_info("i40e %02x.%x " s,                      \
+                       (h)->bus.device, (h)->bus.func,         \
+                       ##__VA_ARGS__);                         \
+} while (0)
+
+/* these things are all directly replaced with sed during the kernel build */
+#define INLINE inline
+
+
+#define CPU_TO_LE16(o) cpu_to_le16(o)
+#define CPU_TO_LE32(s) cpu_to_le32(s)
+#define CPU_TO_LE64(h) cpu_to_le64(h)
+#define LE16_TO_CPU(a) le16_to_cpu(a)
+#define LE32_TO_CPU(c) le32_to_cpu(c)
+#define LE64_TO_CPU(k) le64_to_cpu(k)
+
+/* SW spinlock */
+struct i40e_spinlock {
+       struct mutex spinlock;
+};
+
+#define i40e_init_spinlock(_sp) i40e_init_spinlock_d(_sp)
+#define i40e_acquire_spinlock(_sp) i40e_acquire_spinlock_d(_sp)
+#define i40e_release_spinlock(_sp) i40e_release_spinlock_d(_sp)
+#define i40e_destroy_spinlock(_sp) i40e_destroy_spinlock_d(_sp)
+
+#define I40E_HTONL(a)          htonl(a)
+
+#define i40e_memset(a, b, c, d)  memset((a), (b), (c))
+#define i40e_memcpy(a, b, c, d)  memcpy((a), (b), (c))
+
+typedef enum i40e_status_code i40e_status;
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#define I40E_FCOE
+#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif /* _I40E_OSDEP_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_prototype.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_prototype.h
new file mode 100644 (file)
index 0000000..d1fff9b
--- /dev/null
@@ -0,0 +1,468 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures.  These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40e_init_adminq(struct i40e_hw *hw);
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
+i40e_status i40e_init_asq(struct i40e_hw *hw);
+i40e_status i40e_init_arq(struct i40e_hw *hw);
+i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw);
+i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw);
+i40e_status i40e_shutdown_asq(struct i40e_hw *hw);
+i40e_status i40e_shutdown_arq(struct i40e_hw *hw);
+u16 i40e_clean_asq(struct i40e_hw *hw);
+void i40e_free_adminq_asq(struct i40e_hw *hw);
+void i40e_free_adminq_arq(struct i40e_hw *hw);
+i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+                                            struct i40e_arq_event_info *e,
+                                            u16 *events_pending);
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+                               struct i40e_aq_desc *desc,
+                               void *buff, /* can be NULL */
+                               u16  buff_size,
+                               struct i40e_asq_cmd_details *cmd_details);
+bool i40e_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+                  void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+
+/* admin send queue commands */
+
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+                               u16 *fw_major_version, u16 *fw_minor_version,
+                               u32 *fw_build,
+                               u16 *api_major_version, u16 *api_minor_version,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+                               u32 reg_addr, u64 reg_val,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+                               u32  reg_addr, u64 *reg_val,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+                       bool qualified_modules, bool report_init,
+                       struct i40e_aq_get_phy_abilities_resp *abilities,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_phy_config(struct i40e_hw *hw,
+                               struct i40e_aq_set_phy_config *config,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+                                 bool atomic_reset);
+i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_mac_config(struct i40e_hw *hw,
+                               u16 max_frame_size, bool crc_en, u16 pacing,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+                               u64 *advt_reg,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_partner_advt(struct i40e_hw *hw,
+                               u64 *advt_reg,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+               bool enable_link, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+                               bool enable_lse, struct i40e_link_status *link,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+                               u64 advt_reg,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+                               struct i40e_driver_version *dv,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+                               struct i40e_vsi_context *vsi_ctx,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+                               u16 vsi_id, bool set_filter,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+               u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+               u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+                               u16 seid, bool enable, u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+                               u16 seid, bool enable, u16 vid,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+                               struct i40e_vsi_context *vsi_ctx,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+                               struct i40e_vsi_context *vsi_ctx,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+                               u16 downlink_seid, u8 enabled_tc,
+                               bool default_port, bool enable_l2_filtering,
+                               u16 *pveb_seid,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+                               u16 veb_seid, u16 *switch_id, bool *floating,
+                               u16 *statistic_index, u16 *vebs_used,
+                               u16 *vebs_free,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+                       struct i40e_aqc_add_macvlan_element_data *mv_list,
+                       u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+                       struct i40e_aqc_remove_macvlan_element_data *mv_list,
+                       u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+                       struct i40e_aqc_add_remove_vlan_element_data *v_list,
+                       u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+                       struct i40e_aqc_add_remove_vlan_element_data *v_list,
+                       u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+                               u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+                               struct i40e_aqc_get_switch_config_resp *buf,
+                               u16 buf_size, u16 *start_seid,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+                               enum i40e_aq_resources_ids resource,
+                               enum i40e_aq_resource_access_type access,
+                               u8 sdp_number, u64 *timeout,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+                               enum i40e_aq_resources_ids resource,
+                               u8 sdp_number,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+                               u32 offset, u16 length, void *data,
+                               bool last_command,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+                               u32 offset, u16 length, bool last_command,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_read_nvm_config(struct i40e_hw *hw,
+                               u8 cmd_flags, u32 field_id, void *data,
+                               u16 buf_size, u16 *element_count,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_write_nvm_config(struct i40e_hw *hw,
+                               u8 cmd_flags, void *data, u16 buf_size,
+                               u16 element_count,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_oem_post_update(struct i40e_hw *hw,
+                               void *buff, u16 buff_size,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+                               void *buff, u16 buff_size, u16 *data_size,
+                               enum i40e_admin_queue_opc list_type_opc,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+                               u32 offset, u16 length, void *data,
+                               bool last_command,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+                               u8 mib_type, void *buff, u16 buff_size,
+                               u16 *local_len, u16 *remote_len,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+                               u8 mib_type, void *buff, u16 buff_size,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+                               bool enable_update,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+                               void *buff, u16 buff_size, u16 tlv_len,
+                               u16 *mib_len,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+                               u8 bridge_type, void *buff, u16 buff_size,
+                               u16 old_len, u16 new_len, u16 offset,
+                               u16 *mib_len,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+                               u8 bridge_type, void *buff, u16 buff_size,
+                               u16 tlv_len, u16 *mib_len,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+                               void *buff, u16 buff_size,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+                               bool start_agent,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+                               u16 udp_port, u8 protocol_index,
+                               u8 *filter_index,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+                       u8 *num_entries,
+                       struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+                       u16 count,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+                                      u16 mac_seid, u16 vsi_seid,
+                                      u16 *ret_seid);
+i40e_status i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+                               u16 vsi_seid, u16 tag, u16 queue_num,
+                               u16 *tags_used, u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+                               u16 tag, u16 *tags_used, u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+                               u16 etag, u8 num_tags_in_buf, void *buf,
+                               u16 *tags_used, u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+                               u16 etag, u16 *tags_used, u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+                               u16 old_tag, u16 new_tag, u16 *tags_used,
+                               u16 *tags_free,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+                               u16 vlan_id, u16 *stat_index,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+                               u16 vlan_id, u16 stat_index,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_port_parameters(struct i40e_hw *hw,
+                               u16 bad_frame_vsi, bool save_bad_pac,
+                               bool pad_short_pac, bool double_vlan,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+                                   u16 flags, u8 *mac_addr,
+                                   struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+                               u16 seid, u16 credit, u8 max_credit,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
+                               u8 tcmap, bool request, u8 *tcmap_ret,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+                               enum i40e_aq_hmc_profile *profile,
+                               u8 *pe_vf_enabled_count,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_ets_bw_limit(
+       struct i40e_hw *hw, u16 seid,
+       struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+                       u16 seid,
+                       struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+                               enum i40e_aq_hmc_profile profile,
+                               u8 pe_vf_enabled_count,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+                               u16 seid, u16 credit, u8 max_bw,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+                       struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+               u16 seid,
+               struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+               enum i40e_admin_queue_opc opcode,
+               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+       u16 seid,
+       struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+                       u16 seid,
+                       struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+                       u16 seid,
+                       struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+               u16 seid,
+               struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+               u16 seid,
+               struct i40e_aqc_query_port_ets_config_resp *bw_data,
+               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+               u16 seid,
+               struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+                                       struct i40e_lldp_variables *lldp_cfg);
+i40e_status i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+               u16 vsi,
+               struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+               u8 filter_count);
+
+i40e_status i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+               u16 vsi,
+               struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+               u8 filter_count);
+
+i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+                               u32 reg_addr0, u32 *reg_val0,
+                               u32 reg_addr1, u32 *reg_val1);
+i40e_status i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+                               u32 addr, u32 dw_count, void *buffer);
+i40e_status i40e_aq_alternate_write(struct i40e_hw *hw,
+                               u32 reg_addr0, u32 reg_val0,
+                               u32 reg_addr1, u32 reg_val1);
+i40e_status i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+                               u32 addr, u32 dw_count, void *buffer);
+i40e_status i40e_aq_alternate_clear(struct i40e_hw *hw);
+i40e_status i40e_aq_alternate_write_done(struct i40e_hw *hw,
+                               u8 bios_mode, bool *reset_needed);
+i40e_status i40e_aq_set_oem_mode(struct i40e_hw *hw,
+                               u8 oem_mode);
+
+/* i40e_common */
+i40e_status i40e_init_shared_code(struct i40e_hw *hw);
+i40e_status i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+i40e_status i40e_update_link_info(struct i40e_hw *hw);
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+               u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
+i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+                       struct i40e_aqc_configure_partition_bw_data *bw_data,
+                       struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+                                           u32 pba_num_size);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+#ifdef I40E_FCOE
+i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+#endif
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
+/* prototype for functions used for NVM access */
+i40e_status i40e_init_nvm(struct i40e_hw *hw);
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+                                     enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+                                        u16 *data);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+                                          u16 *words, u16 *data);
+i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
+                                       u32 offset, u16 words, void *data,
+                                       bool last_command);
+i40e_status i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+                                         void *data);
+i40e_status i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
+                                           u32 offset, u16 words, void *data);
+i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
+i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+                                                u16 *checksum);
+i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+                                         struct i40e_nvm_access *cmd,
+                                         u8 *bytes, int *);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+       return i40e_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void i40e_init_spinlock(struct i40e_spinlock *sp);
+void i40e_acquire_spinlock(struct i40e_spinlock *sp);
+void i40e_release_spinlock(struct i40e_spinlock *sp);
+void i40e_destroy_spinlock(struct i40e_spinlock *sp);
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+                            struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+                               enum i40e_virtchnl_ops v_opcode,
+                               i40e_status v_retval,
+                               u8 *msg, u16 msglen,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+                               struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+                               u8 *mac_addr, u16 ethtype, u16 flags,
+                               u16 vsi_seid, u16 queue, bool is_add,
+                               struct i40e_control_filter_stats *stats,
+                               struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+                               u8 table_id, u32 start_index, u16 buff_size,
+                               void *buff, u16 *ret_buff_size,
+                               u8 *ret_next_table, u32 *ret_next_index,
+                               struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+                                                   u16 vsi_seid);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ptp.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_ptp.c
new file mode 100644 (file)
index 0000000..a6dce3e
--- /dev/null
@@ -0,0 +1,761 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+#ifdef HAVE_PTP_1588_CLOCK
+#include <linux/ptp_classify.h>
+
+/* The XL710 timesync is very much like Intel's 82599 design when it comes to
+ * the fundamental clock design. However, the clock operations are much simpler
+ * in the XL710 because the device supports a full 64 bits of nanoseconds.
+ * Because the field is so wide, we can forgo the cycle counter and just
+ * operate with the nanosecond field directly without fear of overflow.
+ *
+ * Much like the 82599, the update period is dependent upon the link speed:
+ * At 40Gb link or no link, the period is 1.6ns.
+ * At 10Gb link, the period is multiplied by 2. (3.2ns)
+ * At 1Gb link, the period is multiplied by 20. (32ns)
+ * 1588 functionality is not supported at 100Mbps.
+ */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
+#define I40E_PTP_1GB_INCVAL  0x2000000000ULL
+
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (2 << \
+                                       I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+
+/**
+ * i40e_ptp_read - Read the PHC time from the device
+ * @pf: Board private structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * This function reads the PRTTSYN_TIME registers and stores them in a
+ * timespec. However, since the registers are 64 bits of nanoseconds, we must
+ * convert the result to a timespec before we can return.
+ **/
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 hi, lo;
+       u64 ns;
+
+       /* The timer latches on the lowest register read. */
+       lo = rd32(hw, I40E_PRTTSYN_TIME_L);
+       hi = rd32(hw, I40E_PRTTSYN_TIME_H);
+
+       ns = (((u64)hi) << 32) | lo;
+
+       *ts = ns_to_timespec64(ns);
+}
+
+/**
+ * i40e_ptp_write - Write the PHC time to the device
+ * @pf: Board private structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * This function writes the PRTTSYN_TIME registers with the user value. Since
+ * we receive a timespec from the stack, we must convert that timespec into
+ * nanoseconds before programming the registers.
+ **/
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u64 ns = timespec64_to_ns(ts);
+
+       /* The timer will not update until the high register is written, so
+        * write the low register first.
+        */
+       wr32(hw, I40E_PRTTSYN_TIME_L, (u32)ns);
+       wr32(hw, I40E_PRTTSYN_TIME_H, (u32)(ns >> 32));
+}
+
+/**
+ * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time
+ * @hwtstamps: Timestamp structure to update
+ * @timestamp: Timestamp from the hardware
+ *
+ * We need to convert the NIC clock value into a hwtstamp which can be used by
+ * the upper level timestamping functions. Since the timestamp is simply a 64-
+ * bit nanosecond value, we can call ns_to_ktime directly to handle this.
+ **/
+static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
+                                        u64 timestamp)
+{
+       memset(hwtstamps, 0, sizeof(*hwtstamps));
+
+       hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+}
+
+/**
+ * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * @ptp: The PTP clock structure
+ * @ppb: Parts per billion adjustment from the base
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+       struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+       struct i40e_hw *hw = &pf->hw;
+       u64 adj, freq, diff;
+       int neg_adj = 0;
+
+       if (ppb < 0) {
+               neg_adj = 1;
+               ppb = -ppb;
+       }
+
+       smp_mb(); /* Force any pending update before accessing. */
+       adj = ACCESS_ONCE(pf->ptp_base_adj);
+
+       freq = adj;
+       freq *= ppb;
+       diff = div_u64(freq, 1000000000ULL);
+
+       if (neg_adj)
+               adj -= diff;
+       else
+               adj += diff;
+
+       wr32(hw, I40E_PRTTSYN_INC_L, (u32)adj);
+       wr32(hw, I40E_PRTTSYN_INC_H, (u32)(adj >> 32));
+
+       return 0;
+}
+
+/**
+ * i40e_ptp_adjtime - Adjust the PHC time
+ * @ptp: The PTP clock structure
+ * @delta: Offset in nanoseconds to adjust the PHC time by
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+       struct timespec64 now, then = ns_to_timespec64(delta);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pf->tmreg_lock, flags);
+
+       i40e_ptp_read(pf, &now);
+       now = timespec_add(now, then);
+       i40e_ptp_write(pf, (const struct timespec64 *)&now);
+
+       spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+       return 0;
+}
+
+/**
+ * i40e_ptp_gettime64 - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec64 structure to hold the current time value
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+       struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pf->tmreg_lock, flags);
+       i40e_ptp_read(pf, ts);
+       spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+       return 0;
+}
+
+/**
+ * i40e_ptp_settime64 - Set the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec64 structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime64(struct ptp_clock_info *ptp,
+                           const struct timespec64 *ts)
+{
+       struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pf->tmreg_lock, flags);
+       i40e_ptp_write(pf, ts);
+       spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+       return 0;
+}
+
+#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64
+/**
+ * i40e_ptp_gettime - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       struct timespec64 ts64;
+       int err;
+
+       err = i40e_ptp_gettime64(ptp, &ts64);
+       if (err)
+               return err;
+
+       *ts = timespec64_to_timespec(ts64);
+       return 0;
+}
+
+/**
+ * i40e_ptp_settime - Set the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime(struct ptp_clock_info *ptp,
+                           const struct timespec *ts)
+{
+       struct timespec64 ts64 = timespec_to_timespec64(*ts);
+
+       return i40e_ptp_settime64(ptp, &ts64);
+}
+#endif
+
+/**
+ * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem
+ * @ptp: The PTP clock structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * The XL710 does not support any of the ancillary features of the PHC
+ * subsystem, so this function may just return.
+ **/
+static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
+                                  struct ptp_clock_request *rq, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
+ * @vsi: The VSI with the rings relevant to 1588
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ **/
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_ring *rx_ring;
+       unsigned long rx_event;
+       u32 prttsyn_stat;
+       int n;
+
+       /* Since we cannot turn off the Rx timestamp logic if the device is
+        * configured for Tx timestamping, we check if Rx timestamping is
+        * configured. We don't want to spuriously warn about Rx timestamp
+        * hangs if we don't care about the timestamps.
+        */
+       if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+               return;
+
+       prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+       /* Unless all four receive timestamp registers are latched, we are not
+        * concerned about a possible PTP Rx hang, so just update the timeout
+        * counter and exit.
+        */
+       if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
+                              I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
+                             (I40E_PRTTSYN_STAT_1_RXT1_MASK <<
+                              I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
+                             (I40E_PRTTSYN_STAT_1_RXT2_MASK <<
+                              I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
+                             (I40E_PRTTSYN_STAT_1_RXT3_MASK <<
+                              I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
+               pf->last_rx_ptp_check = jiffies;
+               return;
+       }
+
+       /* Determine the most recent watchdog or rx_timestamp event. */
+       rx_event = pf->last_rx_ptp_check;
+       for (n = 0; n < vsi->num_queue_pairs; n++) {
+               rx_ring = vsi->rx_rings[n];
+               if (time_after(rx_ring->last_rx_timestamp, rx_event))
+                       rx_event = rx_ring->last_rx_timestamp;
+       }
+
+       /* Only need to read the high RXSTMP register to clear the lock */
+       if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+               rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+               rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+               rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+               rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+               pf->last_rx_ptp_check = jiffies;
+               pf->rx_hwtstamp_cleared++;
+               dev_warn(&vsi->back->pdev->dev,
+                        "clearing PTP Rx timestamp hang\n");
+       }
+}
+
+/**
+ * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
+ * @pf: Board private structure
+ *
+ * Read the value of the Tx timestamp from the registers, convert it into a
+ * value consumable by the stack, and store that result into the shhwtstamps
+ * struct before returning it up the stack.
+ **/
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
+{
+       struct skb_shared_hwtstamps shhwtstamps;
+       struct i40e_hw *hw = &pf->hw;
+       u32 hi, lo;
+       u64 ns;
+
+       if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+               return;
+
+       /* don't attempt to timestamp if we don't have an skb */
+       if (!pf->ptp_tx_skb)
+               return;
+
+       lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
+       hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
+
+       ns = (((u64)hi) << 32) | lo;
+
+       i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
+       skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
+       dev_kfree_skb_any(pf->ptp_tx_skb);
+       pf->ptp_tx_skb = NULL;
+}
+
+/**
+ * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp
+ * @pf: Board private structure
+ * @skb: Particular skb to send timestamp with
+ * @index: Index into the receive timestamp registers for the timestamp
+ *
+ * The XL710 receives a notification in the receive descriptor with an offset
+ * into the set of RXTIME registers where the timestamp is for that skb. This
+ * function goes and fetches the receive timestamp from that offset, if a valid
+ * one exists. The RXTIME registers are in ns, so we must convert the result
+ * first.
+ **/
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
+{
+       u32 prttsyn_stat, hi, lo;
+       struct i40e_hw *hw;
+       u64 ns;
+
+       /* Since we cannot turn off the Rx timestamp logic if the device is
+        * doing Tx timestamping, check if Rx timestamping is configured.
+        */
+       if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+               return;
+
+       hw = &pf->hw;
+
+       prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+       if (!(prttsyn_stat & BIT(index)))
+               return;
+
+       lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
+       hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+
+       ns = (((u64)hi) << 32) | lo;
+
+       i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
+}
+
+/**
+ * i40e_ptp_set_increment - Utility function to update clock increment rate
+ * @pf: Board private structure
+ *
+ * During a link change, the DMA frequency that drives the 1588 logic will
+ * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds,
+ * we must update the increment value per clock tick.
+ **/
+void i40e_ptp_set_increment(struct i40e_pf *pf)
+{
+       struct i40e_link_status *hw_link_info;
+       struct i40e_hw *hw = &pf->hw;
+       u64 incval;
+
+       hw_link_info = &hw->phy.link_info;
+
+       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+
+       switch (hw_link_info->link_speed) {
+       case I40E_LINK_SPEED_10GB:
+               incval = I40E_PTP_10GB_INCVAL;
+               break;
+       case I40E_LINK_SPEED_1GB:
+               incval = I40E_PTP_1GB_INCVAL;
+               break;
+       case I40E_LINK_SPEED_100MB:
+               dev_warn(&pf->pdev->dev,
+                        "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
+               incval = 0;
+               break;
+       case I40E_LINK_SPEED_40GB:
+       default:
+               incval = I40E_PTP_40GB_INCVAL;
+               break;
+       }
+
+       /* Write the new increment value into the increment register. The
+        * hardware will not update the clock until both registers have been
+        * written.
+        */
+       wr32(hw, I40E_PRTTSYN_INC_L, (u32)incval);
+       wr32(hw, I40E_PRTTSYN_INC_H, (u32)(incval >> 32));
+
+       /* Update the base adjustement value. */
+       ACCESS_ONCE(pf->ptp_base_adj) = incval;
+       smp_mb(); /* Force the above update. */
+}
+
+/**
+ * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Obtain the current hardware timestamping settigs as requested. To do this,
+ * keep a shadow copy of the timestamp settings rather than attempting to
+ * deconstruct it from the registers.
+ **/
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+       struct hwtstamp_config *config = &pf->tstamp_config;
+
+       if (!(pf->flags & I40E_FLAG_PTP))
+               return -EOPNOTSUPP;
+
+       return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+               -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
+ * @pf: Board private structure
+ * @config: hwtstamp settings requested or saved
+ *
+ * Control hardware registers to enter the specific mode requested by the
+ * user. Also used during reset path to ensure that timestamp settings are
+ * maintained.
+ *
+ * Note: modifies config in place, and may update the requested mode to be
+ * more broad if the specific filter is not directly supported.
+ **/
+static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
+                                      struct hwtstamp_config *config)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 tsyntype, regval;
+
+       /* Reserved for future extensions. */
+       if (config->flags)
+               return -EINVAL;
+
+       switch (config->tx_type) {
+       case HWTSTAMP_TX_OFF:
+               pf->ptp_tx = false;
+               break;
+       case HWTSTAMP_TX_ON:
+               pf->ptp_tx = true;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (config->rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               pf->ptp_rx = false;
+               /* We set the type to V1, but do not enable UDP packet
+                * recognition. In this way, we should be as close to
+                * disabling PTP Rx timestamps as possible since V1 packets
+                * are always UDP, since L2 packets are a V2 feature.
+                */
+               tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+               pf->ptp_rx = true;
+               tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
+                          I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
+                          I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+               config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               pf->ptp_rx = true;
+               tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
+                          I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
+                          I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+               config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               break;
+       case HWTSTAMP_FILTER_ALL:
+       default:
+               return -ERANGE;
+       }
+
+       /* Clear out all 1588-related registers to clear and unlatch them. */
+       rd32(hw, I40E_PRTTSYN_STAT_0);
+       rd32(hw, I40E_PRTTSYN_TXTIME_H);
+       rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+       rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+       rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+       rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+
+       /* Enable/disable the Tx timestamp interrupt based on user input. */
+       regval = rd32(hw, I40E_PRTTSYN_CTL0);
+       if (pf->ptp_tx)
+               regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+       else
+               regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+       wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+       regval = rd32(hw, I40E_PFINT_ICR0_ENA);
+       if (pf->ptp_tx)
+               regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+       else
+               regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+       wr32(hw, I40E_PFINT_ICR0_ENA, regval);
+
+       /* Although there is no simple on/off switch for Rx, we "disable" Rx
+        * timestamps by setting to V1 only mode and clear the UDP
+        * recognition. This ought to disable all PTP Rx timestamps as V1
+        * packets are always over UDP. Note that software is configured to
+        * ignore Rx timestamps via the pf->ptp_rx flag.
+        */
+       regval = rd32(hw, I40E_PRTTSYN_CTL1);
+       /* clear everything but the enable bit */
+       regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+       /* now enable bits for desired Rx timestamps */
+       regval |= tsyntype;
+       wr32(hw, I40E_PRTTSYN_CTL1, regval);
+
+       return 0;
+}
+
+/**
+ * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Respond to the user filter requests and make the appropriate hardware
+ * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
+ * logic, so keep track in software of whether to indicate these timestamps
+ * or not.
+ *
+ * It is permissible to "upgrade" the user request to a broader filter, as long
+ * as the user receives the timestamps they care about and the user is notified
+ * the filter has been broadened.
+ **/
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+       struct hwtstamp_config config;
+       int err;
+
+       if (!(pf->flags & I40E_FLAG_PTP))
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       err = i40e_ptp_set_timestamp_mode(pf, &config);
+       if (err)
+               return err;
+
+       /* save these settings for future reference */
+       pf->tstamp_config = config;
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+               -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_create_clock - Create PTP clock device for userspace
+ * @pf: Board private structure
+ *
+ * This function creates a new PTP clock device. It only creates one if we
+ * don't already have one, so it is safe to call. Will return error if it
+ * can't create one, but success if we already have a device. Should be used
+ * by i40e_ptp_init to create clock initially, and prevent global resets from
+ * creating new clock devices.
+ **/
+static long i40e_ptp_create_clock(struct i40e_pf *pf)
+{
+       /* no need to create a clock device if we already have one */
+       if (!IS_ERR_OR_NULL(pf->ptp_clock))
+               return 0;
+
+       strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name));
+       pf->ptp_caps.owner = THIS_MODULE;
+       pf->ptp_caps.max_adj = 999999999;
+       pf->ptp_caps.n_ext_ts = 0;
+       pf->ptp_caps.pps = 0;
+       pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+       pf->ptp_caps.adjtime = i40e_ptp_adjtime;
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
+       pf->ptp_caps.gettime64 = i40e_ptp_gettime64;
+       pf->ptp_caps.settime64 = i40e_ptp_settime64;
+#else
+       pf->ptp_caps.gettime = i40e_ptp_gettime;
+       pf->ptp_caps.settime = i40e_ptp_settime;
+#endif
+       pf->ptp_caps.enable = i40e_ptp_feature_enable;
+
+       /* Attempt to register the clock before enabling the hardware. */
+       pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
+       if (IS_ERR(pf->ptp_clock))
+               return PTR_ERR(pf->ptp_clock);
+
+       /* clear the hwtstamp settings here during clock create, instead of
+        * during regular init, so that we can maintain settings across a
+        * reset or suspend.
+        */
+       pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+       pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+       return 0;
+}
+
+/**
+ * i40e_ptp_init - Initialize the 1588 support after device probe or reset
+ * @pf: Board private structure
+ *
+ * This function sets device up for 1588 support. The first time it is run, it
+ * will create a PHC clock device. It does not create a clock device if one
+ * already exists. It also reconfigures the device after a reset.
+ **/
+void i40e_ptp_init(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 pf_id;
+       long err;
+
+       /* Only one PF is assigned to control 1588 logic per port. Do not
+        * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID
+        */
+       pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
+               I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+       if (hw->pf_id != pf_id) {
+               pf->flags &= ~I40E_FLAG_PTP;
+               dev_info(&pf->pdev->dev, "PTP not supported on this device\n");
+               return;
+       }
+
+       /* we have to initialize the spinlock first, since we can't control
+        * when the user will enter the PHC device entry points
+        */
+       spin_lock_init(&pf->tmreg_lock);
+
+       /* ensure we have a clock device */
+       err = i40e_ptp_create_clock(pf);
+       if (err) {
+               pf->ptp_clock = NULL;
+               dev_err(&pf->pdev->dev,
+                       "PTP clock register failed: %ld\n", err);
+       } else {
+               struct timespec64 ts;
+               u32 regval;
+
+               if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+                       dev_info(&pf->pdev->dev, "PHC enabled\n");
+               pf->flags |= I40E_FLAG_PTP;
+
+               /* Ensure the clocks are running. */
+               regval = rd32(hw, I40E_PRTTSYN_CTL0);
+               regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK;
+               wr32(hw, I40E_PRTTSYN_CTL0, regval);
+               regval = rd32(hw, I40E_PRTTSYN_CTL1);
+               regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+               wr32(hw, I40E_PRTTSYN_CTL1, regval);
+
+               /* Set the increment value per clock tick. */
+               i40e_ptp_set_increment(pf);
+
+               /* reset timestamping mode */
+               i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
+
+               /* Set the clock value. */
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
+               ts = ktime_to_timespec64(ktime_get_real());
+               i40e_ptp_settime64(&pf->ptp_caps, &ts);
+#else
+               ts = ktime_to_timespec(ktime_get_real());
+               i40e_ptp_settime(&pf->ptp_caps, &ts);
+#endif
+       }
+}
+
+/**
+ * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC
+ * @pf: Board private structure
+ *
+ * This function handles the cleanup work required from the initialization by
+ * clearing out the important information and unregistering the PHC.
+ **/
+void i40e_ptp_stop(struct i40e_pf *pf)
+{
+       pf->flags &= ~I40E_FLAG_PTP;
+       pf->ptp_tx = false;
+       pf->ptp_rx = false;
+
+       if (pf->ptp_tx_skb) {
+               dev_kfree_skb_any(pf->ptp_tx_skb);
+               pf->ptp_tx_skb = NULL;
+       }
+
+       if (pf->ptp_clock) {
+               ptp_clock_unregister(pf->ptp_clock);
+               pf->ptp_clock = NULL;
+               dev_info(&pf->pdev->dev, "removed PHC from %s\n",
+                        pf->vsi[pf->lan_vsi]->netdev->name);
+       }
+}
+#endif /* HAVE_PTP_1588_CLOCK */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_register.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_register.h
new file mode 100644 (file)
index 0000000..00abf0f
--- /dev/null
@@ -0,0 +1,3376 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_status.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_status.h
new file mode 100644 (file)
index 0000000..9e1f25c
--- /dev/null
@@ -0,0 +1,100 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+       I40E_SUCCESS                            = 0,
+       I40E_ERR_NVM                            = -1,
+       I40E_ERR_NVM_CHECKSUM                   = -2,
+       I40E_ERR_PHY                            = -3,
+       I40E_ERR_CONFIG                         = -4,
+       I40E_ERR_PARAM                          = -5,
+       I40E_ERR_MAC_TYPE                       = -6,
+       I40E_ERR_UNKNOWN_PHY                    = -7,
+       I40E_ERR_LINK_SETUP                     = -8,
+       I40E_ERR_ADAPTER_STOPPED                = -9,
+       I40E_ERR_INVALID_MAC_ADDR               = -10,
+       I40E_ERR_DEVICE_NOT_SUPPORTED           = -11,
+       I40E_ERR_MASTER_REQUESTS_PENDING        = -12,
+       I40E_ERR_INVALID_LINK_SETTINGS          = -13,
+       I40E_ERR_AUTONEG_NOT_COMPLETE           = -14,
+       I40E_ERR_RESET_FAILED                   = -15,
+       I40E_ERR_SWFW_SYNC                      = -16,
+       I40E_ERR_NO_AVAILABLE_VSI               = -17,
+       I40E_ERR_NO_MEMORY                      = -18,
+       I40E_ERR_BAD_PTR                        = -19,
+       I40E_ERR_RING_FULL                      = -20,
+       I40E_ERR_INVALID_PD_ID                  = -21,
+       I40E_ERR_INVALID_QP_ID                  = -22,
+       I40E_ERR_INVALID_CQ_ID                  = -23,
+       I40E_ERR_INVALID_CEQ_ID                 = -24,
+       I40E_ERR_INVALID_AEQ_ID                 = -25,
+       I40E_ERR_INVALID_SIZE                   = -26,
+       I40E_ERR_INVALID_ARP_INDEX              = -27,
+       I40E_ERR_INVALID_FPM_FUNC_ID            = -28,
+       I40E_ERR_QP_INVALID_MSG_SIZE            = -29,
+       I40E_ERR_QP_TOOMANY_WRS_POSTED          = -30,
+       I40E_ERR_INVALID_FRAG_COUNT             = -31,
+       I40E_ERR_QUEUE_EMPTY                    = -32,
+       I40E_ERR_INVALID_ALIGNMENT              = -33,
+       I40E_ERR_FLUSHED_QUEUE                  = -34,
+       I40E_ERR_INVALID_PUSH_PAGE_INDEX        = -35,
+       I40E_ERR_INVALID_IMM_DATA_SIZE          = -36,
+       I40E_ERR_TIMEOUT                        = -37,
+       I40E_ERR_OPCODE_MISMATCH                = -38,
+       I40E_ERR_CQP_COMPL_ERROR                = -39,
+       I40E_ERR_INVALID_VF_ID                  = -40,
+       I40E_ERR_INVALID_HMCFN_ID               = -41,
+       I40E_ERR_BACKING_PAGE_ERROR             = -42,
+       I40E_ERR_NO_PBLCHUNKS_AVAILABLE         = -43,
+       I40E_ERR_INVALID_PBLE_INDEX             = -44,
+       I40E_ERR_INVALID_SD_INDEX               = -45,
+       I40E_ERR_INVALID_PAGE_DESC_INDEX        = -46,
+       I40E_ERR_INVALID_SD_TYPE                = -47,
+       I40E_ERR_MEMCPY_FAILED                  = -48,
+       I40E_ERR_INVALID_HMC_OBJ_INDEX          = -49,
+       I40E_ERR_INVALID_HMC_OBJ_COUNT          = -50,
+       I40E_ERR_INVALID_SRQ_ARM_LIMIT          = -51,
+       I40E_ERR_SRQ_ENABLED                    = -52,
+       I40E_ERR_ADMIN_QUEUE_ERROR              = -53,
+       I40E_ERR_ADMIN_QUEUE_TIMEOUT            = -54,
+       I40E_ERR_BUF_TOO_SHORT                  = -55,
+       I40E_ERR_ADMIN_QUEUE_FULL               = -56,
+       I40E_ERR_ADMIN_QUEUE_NO_WORK            = -57,
+       I40E_ERR_BAD_IWARP_CQE                  = -58,
+       I40E_ERR_NVM_BLANK_MODE                 = -59,
+       I40E_ERR_NOT_IMPLEMENTED                = -60,
+       I40E_ERR_PE_DOORBELL_NOT_ENABLED        = -61,
+       I40E_ERR_DIAG_TEST_FAILED               = -62,
+       I40E_ERR_NOT_READY                      = -63,
+       I40E_NOT_SUPPORTED                      = -64,
+       I40E_ERR_FIRMWARE_API_VERSION           = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.c
new file mode 100644 (file)
index 0000000..0ecb43d
--- /dev/null
@@ -0,0 +1,3090 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/prefetch.h>
+#include "i40e.h"
+#include "i40e_prototype.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+                               u32 td_tag)
+{
+       return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+                          ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
+                          ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+                          ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+                          ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#define I40E_FD_CLEAN_DELAY 10
+/**
+ * i40e_program_fdir_filter - Program a Flow Director filter
+ * @fdir_data: Packet data that will be filter parameters
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @pf: The PF pointer
+ * @add: True for add/update, False for remove
+ **/
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
+                            struct i40e_pf *pf, bool add)
+{
+       struct i40e_filter_program_desc *fdir_desc;
+       struct i40e_tx_buffer *tx_buf, *first;
+       struct i40e_tx_desc *tx_desc;
+       struct i40e_ring *tx_ring;
+       unsigned int fpt, dcc;
+       struct i40e_vsi *vsi;
+       struct device *dev;
+       dma_addr_t dma;
+       u32 td_cmd = 0;
+       u16 delay = 0;
+       u16 i;
+
+       /* find existing FDIR VSI */
+       vsi = NULL;
+       for (i = 0; i < pf->num_alloc_vsi; i++)
+               if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+                       vsi = pf->vsi[i];
+       if (!vsi)
+               return -ENOENT;
+
+       tx_ring = vsi->tx_rings[0];
+       dev = tx_ring->dev;
+
+       /* we need two descriptors to add/del a filter and we can wait */
+       do {
+               if (I40E_DESC_UNUSED(tx_ring) > 1)
+                       break;
+               msleep_interruptible(1);
+               delay++;
+       } while (delay < I40E_FD_CLEAN_DELAY);
+
+       if (!(I40E_DESC_UNUSED(tx_ring) > 1))
+               return -EAGAIN;
+
+       dma = dma_map_single(dev, raw_packet,
+                            I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, dma))
+               goto dma_fail;
+
+       /* grab the next descriptor */
+       i = tx_ring->next_to_use;
+       fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+       first = &tx_ring->tx_bi[i];
+       memset(first, 0, sizeof(struct i40e_tx_buffer));
+
+       tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
+
+       fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+             I40E_TXD_FLTR_QW0_QINDEX_MASK;
+
+       fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+              I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
+
+       fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+              I40E_TXD_FLTR_QW0_PCTYPE_MASK;
+
+       /* Use LAN VSI Id if not programmed by user */
+       if (fdir_data->dest_vsi == 0)
+               fpt |= (pf->vsi[pf->lan_vsi]->id) <<
+                      I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
+       else
+               fpt |= ((u32)fdir_data->dest_vsi <<
+                       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+                      I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
+
+       dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+       if (add)
+               dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+                      I40E_TXD_FLTR_QW1_PCMD_SHIFT;
+       else
+               dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+                      I40E_TXD_FLTR_QW1_PCMD_SHIFT;
+
+       dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+              I40E_TXD_FLTR_QW1_DEST_MASK;
+
+       dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+              I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
+
+       if (fdir_data->cnt_index != 0) {
+               dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+               dcc |= ((u32)fdir_data->cnt_index <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+       }
+
+       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
+       fdir_desc->rsvd = cpu_to_le32(0);
+       fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
+       fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+
+       /* Now program a dummy descriptor */
+       i = tx_ring->next_to_use;
+       tx_desc = I40E_TX_DESC(tx_ring, i);
+       tx_buf = &tx_ring->tx_bi[i];
+
+       tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
+
+       memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
+
+       /* record length, and DMA address */
+       dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
+       dma_unmap_addr_set(tx_buf, dma, dma);
+
+       tx_desc->buffer_addr = cpu_to_le64(dma);
+       td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
+
+       tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
+       tx_buf->raw_buf = (void *)raw_packet;
+
+       tx_desc->cmd_type_offset_bsz =
+               build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
+
+       /* Force memory writes to complete before letting h/w
+        * know there are new descriptors to fetch.
+        */
+       wmb();
+
+       /* Mark the data descriptor to be watched */
+       first->next_to_watch = tx_desc;
+
+       writel(tx_ring->next_to_use, tx_ring->tail);
+       return 0;
+
+dma_fail:
+       return -1;
+}
+
+#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
+/**
+ * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
+                                  struct i40e_fdir_filter *fd_data,
+                                  bool add)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct udphdr *udp;
+       struct iphdr *ip;
+       bool err = false;
+       u8 *raw_packet;
+       int ret;
+       static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+               0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+       if (!raw_packet)
+               return -ENOMEM;
+       memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
+
+       ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+       udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
+             + sizeof(struct iphdr));
+
+       ip->daddr = fd_data->dst_ip[0];
+       udp->dest = fd_data->dst_port;
+       ip->saddr = fd_data->src_ip[0];
+       udp->source = fd_data->src_port;
+
+       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+                        fd_data->pctype, fd_data->fd_id, ret);
+               err = true;
+       } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
+               if (add)
+                       dev_info(&pf->pdev->dev,
+                                "Filter OK for PCTYPE %d loc = %d\n",
+                                fd_data->pctype, fd_data->fd_id);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "Filter deleted for PCTYPE %d loc = %d\n",
+                                fd_data->pctype, fd_data->fd_id);
+       }
+       return err ? -EOPNOTSUPP : 0;
+}
+
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+/**
+ * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
+                                  struct i40e_fdir_filter *fd_data,
+                                  bool add)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct tcphdr *tcp;
+       struct iphdr *ip;
+       bool err = false;
+       u8 *raw_packet;
+       int ret;
+       /* Dummy packet */
+       static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+               0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
+               0x0, 0x72, 0, 0, 0, 0};
+
+       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+       if (!raw_packet)
+               return -ENOMEM;
+       memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
+
+       ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+       tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
+             + sizeof(struct iphdr));
+
+       ip->daddr = fd_data->dst_ip[0];
+       tcp->dest = fd_data->dst_port;
+       ip->saddr = fd_data->src_ip[0];
+       tcp->source = fd_data->src_port;
+
+       if (add) {
+               pf->fd_tcp_rule++;
+               if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+                       pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+               }
+       } else {
+               pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
+                                 (pf->fd_tcp_rule - 1) : 0;
+               if (pf->fd_tcp_rule == 0) {
+                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+               }
+       }
+
+       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+                        fd_data->pctype, fd_data->fd_id, ret);
+               err = true;
+       } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
+               if (add)
+                       dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
+                                fd_data->pctype, fd_data->fd_id);
+               else
+                       dev_info(&pf->pdev->dev,
+                                "Filter deleted for PCTYPE %d loc = %d\n",
+                                fd_data->pctype, fd_data->fd_id);
+       }
+
+       return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+                                   struct i40e_fdir_filter *fd_data,
+                                   bool add)
+{
+       return -EOPNOTSUPP;
+}
+
+#define I40E_IP_DUMMY_PACKET_LEN 34
+/**
+ * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
+                                 struct i40e_fdir_filter *fd_data,
+                                 bool add)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct iphdr *ip;
+       bool err = false;
+       u8 *raw_packet;
+       int ret;
+       int i;
+       static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+               0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0};
+
+       for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+            i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+               raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+               if (!raw_packet)
+                       return -ENOMEM;
+               memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
+               ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+
+               ip->saddr = fd_data->src_ip[0];
+               ip->daddr = fd_data->dst_ip[0];
+               ip->protocol = 0;
+
+               fd_data->pctype = i;
+               ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
+                                fd_data->pctype, fd_data->fd_id, ret);
+                       err = true;
+               } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
+                       if (add)
+                               dev_info(&pf->pdev->dev,
+                                        "Filter OK for PCTYPE %d loc = %d\n",
+                                        fd_data->pctype, fd_data->fd_id);
+                       else
+                               dev_info(&pf->pdev->dev,
+                                        "Filter deleted for PCTYPE %d loc = %d\n",
+                                        fd_data->pctype, fd_data->fd_id);
+               }
+       }
+
+       return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir - Build raw packets to add/del fdir filter
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ * @add: true adds a filter, false removes it
+ *
+ **/
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+                     struct i40e_fdir_filter *input, bool add)
+{
+       struct i40e_pf *pf = vsi->back;
+       int ret;
+
+       switch (input->flow_type & ~FLOW_EXT) {
+       case TCP_V4_FLOW:
+               ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
+               break;
+       case UDP_V4_FLOW:
+               ret = i40e_add_del_fdir_udpv4(vsi, input, add);
+               break;
+       case SCTP_V4_FLOW:
+               ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+               break;
+       case IPV4_FLOW:
+               ret = i40e_add_del_fdir_ipv4(vsi, input, add);
+               break;
+       case IP_USER_FLOW:
+               switch (input->ip4_proto) {
+               case IPPROTO_TCP:
+                       ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
+                       break;
+               case IPPROTO_UDP:
+                       ret = i40e_add_del_fdir_udpv4(vsi, input, add);
+                       break;
+               case IPPROTO_SCTP:
+                       ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+                       break;
+               default:
+                       ret = i40e_add_del_fdir_ipv4(vsi, input, add);
+                       break;
+               }
+               break;
+       default:
+               dev_info(&pf->pdev->dev, "Could not specify spec type %d",
+                        input->flow_type);
+               ret = -EINVAL;
+       }
+
+       /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
+       return ret;
+}
+
+/**
+ * i40e_fd_handle_status - check the Programming Status for FD
+ * @rx_ring: the Rx ring for this descriptor
+ * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
+ * @prog_id: the id originally used for programming
+ *
+ * This is used to verify if the FD programming or invalidation
+ * requested by SW to the HW is successful or not and take actions accordingly.
+ **/
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
+                                 union i40e_rx_desc *rx_desc, u8 prog_id)
+{
+       struct i40e_pf *pf = rx_ring->vsi->back;
+       struct pci_dev *pdev = pf->pdev;
+       u32 fcnt_prog, fcnt_avail;
+       u32 error;
+       u64 qw;
+
+       qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+               I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+
+       if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+               pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
+               if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
+                   (I40E_DEBUG_FD & pf->hw.debug_mask))
+                       dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
+                                pf->fd_inv);
+
+               /* Check if the programming error is for ATR.
+                * If so, auto disable ATR and set a state for
+                * flush in progress. Next time we come here if flush is in
+                * progress do nothing, once flush is complete the state will
+                * be cleared.
+                */
+
+               if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+                       return;
+
+               pf->fd_add_err++;
+               /* store the current atr filter count */
+               pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
+
+               if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                       pf->auto_disable_flags |=
+                                               I40E_FLAG_FD_ATR_ENABLED;
+                       set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+               }
+
+               /* filter programming failed most likely due to table full */
+               fcnt_prog = i40e_get_global_fd_count(pf);
+               fcnt_avail = pf->fdir_pf_filter_count;
+               /* If ATR is running fcnt_prog can quickly change,
+                * if we are very close to full, it makes sense to disable
+                * FD ATR/SB and then re-enable it when there is room.
+                */
+               if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
+                       if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+                         !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                                       dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+                               pf->auto_disable_flags |=
+                                                       I40E_FLAG_FD_SB_ENABLED;
+                       }
+               }
+       } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                       dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
+                                rx_desc->wb.qword0.hi_dword.fd_id);
+       }
+}
+
+/**
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * @ring:      the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+                                           struct i40e_tx_buffer *tx_buffer)
+{
+       if (tx_buffer->skb) {
+               if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+                       kfree(tx_buffer->raw_buf);
+               else
+                       dev_kfree_skb_any(tx_buffer->skb);
+
+               if (dma_unmap_len(tx_buffer, len))
+                       dma_unmap_single(ring->dev,
+                                        dma_unmap_addr(tx_buffer, dma),
+                                        dma_unmap_len(tx_buffer, len),
+                                        DMA_TO_DEVICE);
+       } else if (dma_unmap_len(tx_buffer, len)) {
+               dma_unmap_page(ring->dev,
+                              dma_unmap_addr(tx_buffer, dma),
+                              dma_unmap_len(tx_buffer, len),
+                              DMA_TO_DEVICE);
+       }
+       tx_buffer->next_to_watch = NULL;
+       tx_buffer->skb = NULL;
+       dma_unmap_len_set(tx_buffer, len, 0);
+       /* tx_buffer must be completely set up in the transmit path */
+}
+
+/**
+ * i40e_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+       unsigned long bi_size;
+       u16 i;
+
+       /* ring already cleared, nothing to do */
+       if (!tx_ring->tx_bi)
+               return;
+
+       /* Free all the Tx ring sk_buffs */
+       for (i = 0; i < tx_ring->count; i++)
+               i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+
+       bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+       memset(tx_ring->tx_bi, 0, bi_size);
+
+       /* Zero out the descriptor ring */
+       memset(tx_ring->desc, 0, tx_ring->size);
+
+       tx_ring->next_to_use = 0;
+       tx_ring->next_to_clean = 0;
+
+       if (!tx_ring->netdev)
+               return;
+
+       /* cleanup Tx queue statistics */
+       netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                 tx_ring->queue_index));
+}
+
+/**
+ * i40e_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40e_free_tx_resources(struct i40e_ring *tx_ring)
+{
+       i40e_clean_tx_ring(tx_ring);
+       kfree(tx_ring->tx_bi);
+       tx_ring->tx_bi = NULL;
+
+       if (tx_ring->desc) {
+               dma_free_coherent(tx_ring->dev, tx_ring->size,
+                                 tx_ring->desc, tx_ring->dma);
+               tx_ring->desc = NULL;
+       }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+       u32 head, tail;
+
+       head = i40e_get_head(ring);
+       tail = readl(ring->tail);
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+       u32 tx_done = tx_ring->stats.packets;
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+       u32 tx_pending = i40e_get_tx_pending(tx_ring);
+       struct i40e_pf *pf = tx_ring->vsi->back;
+       bool ret = false;
+
+       clear_check_for_tx_hang(tx_ring);
+
+       /* Check for a hung queue, but be thorough. This verifies
+        * that a transmit has been completed since the previous
+        * check AND there is at least one packet pending. The
+        * ARMED bit is set to indicate a potential hang. The
+        * bit is cleared if a pause frame is received to remove
+        * false hang detection due to PFC or 802.3x frames. By
+        * requiring this to fail twice we avoid races with
+        * PFC clearing the ARMED bit and conditions where we
+        * run the check_tx_hang logic with a transmit completion
+        * pending but without time to complete it yet.
+        */
+       if ((tx_done_old == tx_done) && tx_pending) {
+               /* make sure it is true for two checks in a row */
+               ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+                                      &tx_ring->state);
+       } else if (tx_done_old == tx_done &&
+                  (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
+               if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
+                       dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
+                                tx_pending, tx_ring->queue_index);
+               pf->tx_sluggish_count++;
+       } else {
+               /* update completed stats and disarm the hang check */
+               tx_ring->tx_stats.tx_done_old = tx_done;
+               clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+       }
+
+       return ret;
+}
+
+#define WB_STRIDE 0x3
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring:  tx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+       u16 i = tx_ring->next_to_clean;
+       struct i40e_tx_buffer *tx_buf;
+       struct i40e_tx_desc *tx_head;
+       struct i40e_tx_desc *tx_desc;
+       unsigned int total_packets = 0;
+       unsigned int total_bytes = 0;
+
+       tx_buf = &tx_ring->tx_bi[i];
+       tx_desc = I40E_TX_DESC(tx_ring, i);
+       i -= tx_ring->count;
+
+       tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
+
+       do {
+               struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+               /* if next_to_watch is not set then there is no work pending */
+               if (!eop_desc)
+                       break;
+
+               /* prevent any other reads prior to eop_desc */
+               read_barrier_depends();
+
+               /* we have caught up to head, no work left to do */
+               if (tx_head == tx_desc)
+                       break;
+
+               /* clear next_to_watch to prevent false hangs */
+               tx_buf->next_to_watch = NULL;
+
+               /* update the statistics for this packet */
+               total_bytes += tx_buf->bytecount;
+               total_packets += tx_buf->gso_segs;
+
+               /* free the skb */
+               dev_kfree_skb_any(tx_buf->skb);
+
+               /* unmap skb header data */
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buf, dma),
+                                dma_unmap_len(tx_buf, len),
+                                DMA_TO_DEVICE);
+
+               /* clear tx_buffer data */
+               tx_buf->skb = NULL;
+               dma_unmap_len_set(tx_buf, len, 0);
+
+               /* unmap remaining buffers */
+               while (tx_desc != eop_desc) {
+
+                       tx_buf++;
+                       tx_desc++;
+                       i++;
+                       if (unlikely(!i)) {
+                               i -= tx_ring->count;
+                               tx_buf = tx_ring->tx_bi;
+                               tx_desc = I40E_TX_DESC(tx_ring, 0);
+                       }
+
+                       /* unmap any remaining paged data */
+                       if (dma_unmap_len(tx_buf, len)) {
+                               dma_unmap_page(tx_ring->dev,
+                                              dma_unmap_addr(tx_buf, dma),
+                                              dma_unmap_len(tx_buf, len),
+                                              DMA_TO_DEVICE);
+                               dma_unmap_len_set(tx_buf, len, 0);
+                       }
+               }
+
+               /* move us one more past the eop_desc for start of next pkt */
+               tx_buf++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_buf = tx_ring->tx_bi;
+                       tx_desc = I40E_TX_DESC(tx_ring, 0);
+               }
+
+               prefetch(tx_desc);
+
+               /* update budget accounting */
+               budget--;
+       } while (likely(budget));
+
+       i += tx_ring->count;
+       tx_ring->next_to_clean = i;
+       u64_stats_update_begin(&tx_ring->syncp);
+       tx_ring->stats.bytes += total_bytes;
+       tx_ring->stats.packets += total_packets;
+       u64_stats_update_end(&tx_ring->syncp);
+       tx_ring->q_vector->tx.total_bytes += total_bytes;
+       tx_ring->q_vector->tx.total_packets += total_packets;
+
+       if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+               /* schedule immediate reset if we believe we hung */
+               dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+                        "  VSI                  <%d>\n"
+                        "  Tx Queue             <%d>\n"
+                        "  next_to_use          <%x>\n"
+                        "  next_to_clean        <%x>\n",
+                        tx_ring->vsi->seid,
+                        tx_ring->queue_index,
+                        tx_ring->next_to_use, i);
+
+               netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+               dev_info(tx_ring->dev,
+                        "tx hang detected on queue %d, reset requested\n",
+                        tx_ring->queue_index);
+
+               /* do not fire the reset immediately, wait for the stack to
+                * decide we are truly stuck, also prevents every queue from
+                * simultaneously requesting a reset
+                */
+
+               /* the adapter is about to reset, no point in enabling polling */
+               budget = 1;
+       }
+
+       netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                     tx_ring->queue_index),
+                                 total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+       if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+                    (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+               /* Make sure that anybody stopping the queue after this
+                * sees the new next_to_clean.
+                */
+               smp_mb();
+               if (__netif_subqueue_stopped(tx_ring->netdev,
+                                            tx_ring->queue_index) &&
+                  !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+                       netif_wake_subqueue(tx_ring->netdev,
+                                           tx_ring->queue_index);
+                       ++tx_ring->tx_stats.restart_queue;
+               }
+       }
+
+       return !!budget;
+}
+
+/**
+ * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector  on which to force writeback
+ *
+ **/
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+       if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+               u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                         I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
+                         I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                         I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
+                         /* allow 00 to be written to the index */;
+
+               wr32(&vsi->back->hw,
+                    I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+                                        vsi->base_vector - 1),
+                    val);
+       } else {
+               u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+                         I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
+                         I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+                         I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK
+                         /* allow 00 to be written to the index */;
+
+               wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+
+       }
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Returns true if itr changed, false if not
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt.  The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern.  Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+       enum i40e_latency_range new_latency_range = rc->latency_range;
+       struct i40e_q_vector *qv = rc->ring->q_vector;
+       u32 new_itr = rc->itr;
+       int bytes_per_int;
+       int usecs;
+
+       if (rc->total_packets == 0 || !rc->itr)
+               return false;
+
+       /* simple throttlerate management
+        *   0-10MB/s   lowest (50000 ints/s)
+        *  10-20MB/s   low    (20000 ints/s)
+        *  20-1249MB/s bulk   (18000 ints/s)
+        *  > 40000 rx packets per second (8000 ints/s)
+        *
+        * The math works out because the divisor is in 10^(-6) which
+        * turns the bytes/us input value into MB/s values, but
+        * make sure to use usecs, as the register values written
+        * are in 2 usec increments in the ITR registers, and make sure
+        * to use the smoothed values that the countdown timer gives us.
+        */
+       usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+       bytes_per_int = rc->total_bytes / usecs;
+
+       switch (new_latency_range) {
+       case I40E_LOWEST_LATENCY:
+               if (bytes_per_int > 10)
+                       new_latency_range = I40E_LOW_LATENCY;
+               break;
+       case I40E_LOW_LATENCY:
+               if (bytes_per_int > 20)
+                       new_latency_range = I40E_BULK_LATENCY;
+               else if (bytes_per_int <= 10)
+                       new_latency_range = I40E_LOWEST_LATENCY;
+               break;
+       case I40E_BULK_LATENCY:
+       case I40E_ULTRA_LATENCY:
+       default:
+               if (bytes_per_int <= 20)
+                       new_latency_range = I40E_LOW_LATENCY;
+               break;
+       }
+
+       /* this is to adjust RX more aggressively when streaming small
+        * packets.  The value of 40000 was picked as it is just beyond
+        * what the hardware can receive per second if in low latency
+        * mode.
+        */
+#define RX_ULTRA_PACKET_RATE 40000
+
+       if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+           (&qv->rx == rc))
+               new_latency_range = I40E_ULTRA_LATENCY;
+
+       rc->latency_range = new_latency_range;
+
+       switch (new_latency_range) {
+       case I40E_LOWEST_LATENCY:
+               new_itr = I40E_ITR_50K;
+               break;
+       case I40E_LOW_LATENCY:
+               new_itr = I40E_ITR_20K;
+               break;
+       case I40E_BULK_LATENCY:
+               new_itr = I40E_ITR_18K;
+               break;
+       case I40E_ULTRA_LATENCY:
+               new_itr = I40E_ITR_8K;
+               break;
+       default:
+               break;
+       }
+
+       rc->total_bytes = 0;
+       rc->total_packets = 0;
+
+       if (new_itr != rc->itr) {
+               rc->itr = new_itr;
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * i40e_clean_programming_status - clean the programming status descriptor
+ * @rx_ring: the rx ring that has this descriptor
+ * @rx_desc: the rx descriptor written back by HW
+ *
+ * Flow director should handle FD_FILTER_STATUS to check its filter programming
+ * status being successful or not and take actions accordingly. FCoE should
+ * handle its context/filter programming/invalidation status and take actions.
+ *
+ **/
+static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
+                                         union i40e_rx_desc *rx_desc)
+{
+       u64 qw;
+       u8 id;
+
+       qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+                 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+
+       if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
+               i40e_fd_handle_status(rx_ring, rx_desc, id);
+#ifdef I40E_FCOE
+       else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
+                (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
+               i40e_fcoe_handle_status(rx_ring, rx_desc, id);
+#endif
+}
+
+/**
+ * i40e_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+       struct device *dev = tx_ring->dev;
+       int bi_size;
+
+       if (!dev)
+               return -ENOMEM;
+
+       /* warn if we are about to overwrite the pointer */
+       WARN_ON(tx_ring->tx_bi);
+       bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+       tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+       if (!tx_ring->tx_bi)
+               goto err;
+
+       /* round up to nearest 4K */
+       tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+       /* add u32 for head writeback, align after this takes care of
+        * guaranteeing this is at least one cache line in size
+        */
+       tx_ring->size += sizeof(u32);
+       tx_ring->size = ALIGN(tx_ring->size, 4096);
+       tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+                                          &tx_ring->dma, GFP_KERNEL);
+       if (!tx_ring->desc) {
+               dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+                        tx_ring->size);
+               goto err;
+       }
+
+       tx_ring->next_to_use = 0;
+       tx_ring->next_to_clean = 0;
+       return 0;
+
+err:
+       kfree(tx_ring->tx_bi);
+       tx_ring->tx_bi = NULL;
+       return -ENOMEM;
+}
+
+/**
+ * i40e_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+       struct device *dev = rx_ring->dev;
+       struct i40e_rx_buffer *rx_bi;
+       unsigned long bi_size;
+       u16 i;
+
+       /* ring already cleared, nothing to do */
+       if (!rx_ring->rx_bi)
+               return;
+
+       if (ring_is_ps_enabled(rx_ring)) {
+               int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
+
+               rx_bi = &rx_ring->rx_bi[0];
+               if (rx_bi->hdr_buf) {
+                       dma_free_coherent(dev,
+                                         bufsz,
+                                         rx_bi->hdr_buf,
+                                         rx_bi->dma);
+                       for (i = 0; i < rx_ring->count; i++) {
+                               rx_bi = &rx_ring->rx_bi[i];
+                               rx_bi->dma = 0;
+                               rx_bi->hdr_buf = NULL;
+                       }
+               }
+       }
+       /* Free all the Rx ring sk_buffs */
+       for (i = 0; i < rx_ring->count; i++) {
+               rx_bi = &rx_ring->rx_bi[i];
+               if (rx_bi->dma) {
+                       dma_unmap_single(dev,
+                                        rx_bi->dma,
+                                        rx_ring->rx_buf_len,
+                                        DMA_FROM_DEVICE);
+                       rx_bi->dma = 0;
+               }
+               if (rx_bi->skb) {
+                       dev_kfree_skb(rx_bi->skb);
+                       rx_bi->skb = NULL;
+               }
+               if (rx_bi->page) {
+                       if (rx_bi->page_dma) {
+                               dma_unmap_page(dev,
+                                              rx_bi->page_dma,
+                                              PAGE_SIZE / 2,
+                                              DMA_FROM_DEVICE);
+                               rx_bi->page_dma = 0;
+                       }
+                       __free_page(rx_bi->page);
+                       rx_bi->page = NULL;
+                       rx_bi->page_offset = 0;
+               }
+       }
+
+       bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+       memset(rx_ring->rx_bi, 0, bi_size);
+
+       /* Zero out the descriptor ring */
+       memset(rx_ring->desc, 0, rx_ring->size);
+
+       rx_ring->next_to_clean = 0;
+       rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40e_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+{
+       i40e_clean_rx_ring(rx_ring);
+       kfree(rx_ring->rx_bi);
+       rx_ring->rx_bi = NULL;
+
+       if (rx_ring->desc) {
+               dma_free_coherent(rx_ring->dev, rx_ring->size,
+                                 rx_ring->desc, rx_ring->dma);
+               rx_ring->desc = NULL;
+       }
+}
+
+/**
+ * i40e_alloc_rx_headers - allocate rx header buffers
+ * @rx_ring: ring to alloc buffers
+ *
+ * Allocate rx header buffers for the entire ring. As these are static,
+ * this is only called when setting up a new ring.
+ **/
+void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
+{
+       struct device *dev = rx_ring->dev;
+       struct i40e_rx_buffer *rx_bi;
+       dma_addr_t dma;
+       void *buffer;
+       int buf_size;
+       int i;
+
+       if (rx_ring->rx_bi[0].hdr_buf)
+               return;
+       /* Make sure the buffers don't cross cache line boundaries. */
+       buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
+       buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
+                                   &dma, GFP_KERNEL);
+       if (!buffer)
+               return;
+       for (i = 0; i < rx_ring->count; i++) {
+               rx_bi = &rx_ring->rx_bi[i];
+               rx_bi->dma = dma + (i * buf_size);
+               rx_bi->hdr_buf = buffer + (i * buf_size);
+       }
+}
+/**
+ * i40e_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+       struct device *dev = rx_ring->dev;
+       int bi_size;
+
+       /* warn if we are about to overwrite the pointer */
+       WARN_ON(rx_ring->rx_bi);
+       bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+       rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+       if (!rx_ring->rx_bi)
+               goto err;
+#ifdef HAVE_NDO_GET_STATS64
+
+       u64_stats_init(&rx_ring->syncp);
+#endif /* HAVE_NDO_GET_STATS64 */
+
+       /* Round up to nearest 4K */
+       rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+               ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+               : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+       rx_ring->size = ALIGN(rx_ring->size, 4096);
+       rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+                                          &rx_ring->dma, GFP_KERNEL);
+
+       if (!rx_ring->desc) {
+               dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+                        rx_ring->size);
+               goto err;
+       }
+
+       rx_ring->next_to_clean = 0;
+       rx_ring->next_to_use = 0;
+
+       return 0;
+err:
+       kfree(rx_ring->rx_bi);
+       rx_ring->rx_bi = NULL;
+       return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+       rx_ring->next_to_use = val;
+       /* Force memory writes to complete before letting h/w
+        * know there are new descriptors to fetch.  (Only
+        * applicable for weak-ordered memory model archs,
+        * such as IA-64).
+        */
+       wmb();
+       writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+       u16 i = rx_ring->next_to_use;
+       union i40e_rx_desc *rx_desc;
+       struct i40e_rx_buffer *bi;
+
+       /* do nothing if no valid netdev defined */
+       if (!rx_ring->netdev || !cleaned_count)
+               return;
+
+       while (cleaned_count--) {
+               rx_desc = I40E_RX_DESC(rx_ring, i);
+               bi = &rx_ring->rx_bi[i];
+
+               if (bi->skb) /* desc is in use */
+                       goto no_buffers;
+               if (!bi->page) {
+                       bi->page = alloc_page(GFP_ATOMIC);
+                       if (!bi->page) {
+                               rx_ring->rx_stats.alloc_page_failed++;
+                               goto no_buffers;
+                       }
+               }
+
+               if (!bi->page_dma) {
+                       /* use a half page if we're re-using */
+                       bi->page_offset ^= PAGE_SIZE / 2;
+                       bi->page_dma = dma_map_page(rx_ring->dev,
+                                                   bi->page,
+                                                   bi->page_offset,
+                                                   PAGE_SIZE / 2,
+                                                   DMA_FROM_DEVICE);
+                       if (dma_mapping_error(rx_ring->dev,
+                                             bi->page_dma)) {
+                               rx_ring->rx_stats.alloc_page_failed++;
+                               bi->page_dma = 0;
+                               goto no_buffers;
+                       }
+               }
+
+               dma_sync_single_range_for_device(rx_ring->dev,
+                                                bi->dma,
+                                                0,
+                                                rx_ring->rx_hdr_len,
+                                                DMA_FROM_DEVICE);
+               /* Refresh the desc even if buffer_addrs didn't change
+                * because each write-back erases this info.
+                */
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+               rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+               i++;
+               if (i == rx_ring->count)
+                       i = 0;
+       }
+
+no_buffers:
+       if (rx_ring->next_to_use != i)
+               i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+       u16 i = rx_ring->next_to_use;
+       union i40e_rx_desc *rx_desc;
+       struct i40e_rx_buffer *bi;
+       struct sk_buff *skb;
+
+       /* do nothing if no valid netdev defined */
+       if (!rx_ring->netdev || !cleaned_count)
+               return;
+
+       while (cleaned_count--) {
+               rx_desc = I40E_RX_DESC(rx_ring, i);
+               bi = &rx_ring->rx_bi[i];
+               skb = bi->skb;
+
+               if (!skb) {
+                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                                       rx_ring->rx_buf_len);
+                       if (!skb) {
+                               rx_ring->rx_stats.alloc_buff_failed++;
+                               goto no_buffers;
+                       }
+                       /* initialize queue mapping */
+                       skb_record_rx_queue(skb, rx_ring->queue_index);
+                       bi->skb = skb;
+               }
+
+               if (!bi->dma) {
+                       bi->dma = dma_map_single(rx_ring->dev,
+                                                skb->data,
+                                                rx_ring->rx_buf_len,
+                                                DMA_FROM_DEVICE);
+                       if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+                               rx_ring->rx_stats.alloc_buff_failed++;
+                               bi->dma = 0;
+                               goto no_buffers;
+                       }
+               }
+
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+               rx_desc->read.hdr_addr = 0;
+               i++;
+               if (i == rx_ring->count)
+                       i = 0;
+       }
+
+no_buffers:
+       if (rx_ring->next_to_use != i)
+               i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring:  rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+                            struct sk_buff *skb, u16 vlan_tag)
+{
+       struct i40e_q_vector *q_vector = rx_ring->q_vector;
+       struct i40e_vsi *vsi = rx_ring->vsi;
+       u64 flags = vsi->back->flags;
+
+#ifdef HAVE_VLAN_RX_REGISTER
+       if (vlan_tag & VLAN_VID_MASK) {
+               if (!vsi->vlgrp)
+                       dev_kfree_skb_any(skb);
+               else if (i40e_qv_busy_polling(q_vector))
+                       netif_receive_skb(skb);
+               else if (flags & I40E_FLAG_IN_NETPOLL)
+                       vlan_hwaccel_rx(skb, vsi->vlgrp, vlan_tag);
+               else
+                       vlan_gro_receive(&q_vector->napi, vsi->vlgrp,
+                                        vlan_tag, skb);
+       } else {
+               if (i40e_qv_busy_polling(q_vector))
+                       netif_receive_skb(skb);
+               else if (flags & I40E_FLAG_IN_NETPOLL)
+                       netif_rx(skb);
+               else
+                       napi_gro_receive(&q_vector->napi, skb);
+       }
+#else /* HAVE_VLAN_RX_REGISTER */
+       if (vlan_tag & VLAN_VID_MASK)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+       if (i40e_qv_busy_polling(q_vector))
+               netif_receive_skb(skb);
+       else if (flags & I40E_FLAG_IN_NETPOLL)
+               netif_rx(skb);
+       else
+               napi_gro_receive(&q_vector->napi, skb);
+#endif /* HAVE_VLAN_RX_REGISTER */
+}
+
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+/**
+ * i40e_set_transport_header - adjust skb transport header for VXLAN traffic
+ * @skb: the skb to be adjusted
+ **/
+static inline void i40e_set_transport_header(struct sk_buff *skb)
+{
+       unsigned int vlan_header;
+
+       /* Add 4 bytes for VLAN tagged packets */
+       if (skb->protocol == htons(ETH_P_8021Q) ||
+           skb->protocol == htons(ETH_P_8021AD))
+               vlan_header = VLAN_HLEN;
+       else
+               vlan_header = 0;
+
+       /* set header to L3 of FC */
+       skb_set_transport_header(skb, (skb_mac_header(skb) - skb->data) +
+                                sizeof(struct ethhdr) +
+                                vlan_header + ip_hdr(skb)->ihl * 4);
+}
+
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+                                   struct sk_buff *skb,
+                                   u32 rx_status,
+                                   u32 rx_error,
+                                   u16 rx_ptype)
+{
+       struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+       bool ipv4 = false, ipv6 = false;
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+       bool ipv4_tunnel, ipv6_tunnel;
+       __wsum rx_udp_csum;
+       struct iphdr *iph;
+       __sum16 csum;
+
+       ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+                     (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+       ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+                     (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+#ifndef HAVE_SKBUFF_CSUM_LEVEL
+       skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
+#endif
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+       skb->ip_summed = CHECKSUM_NONE;
+
+       /* Rx csum enabled and ip headers found? */
+#ifdef HAVE_NDO_SET_FEATURES
+       if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+               return;
+#else
+       if (!(vsi->back->flags & I40E_FLAG_RX_CSUM_ENABLED))
+               return;
+#endif
+
+       /* did the hardware decode the packet and checksum? */
+       if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+               return;
+
+       /* both known and outer_ip must be set for the below code to work */
+       if (!(decoded.known && decoded.outer_ip))
+               return;
+
+       if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+           decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+               ipv4 = true;
+       else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+                decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+               ipv6 = true;
+
+       if (ipv4 &&
+           (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+                        BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+#ifdef I40E_ADD_PROBES
+       {
+               vsi->back->rx_ip4_cso_err++;
+               goto checksum_fail;
+       }
+#else
+               goto checksum_fail;
+#endif
+
+       /* likely incorrect csum if alternate IP extension headers found */
+       if (ipv6 &&
+           rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+               /* don't increment checksum err here, non-fatal err */
+               return;
+
+#ifdef I40E_ADD_PROBES
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) {
+               if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
+                       vsi->back->rx_tcp_cso_err++;
+               else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
+                       vsi->back->rx_udp_cso_err++;
+               else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
+                       vsi->back->rx_sctp_cso_err++;
+       }
+#endif
+       /* there was some L4 error, count error and punt packet to the stack */
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
+               goto checksum_fail;
+
+       /* handle packets that were not able to be checksummed due
+        * to arrival speed, in this case the stack can compute
+        * the csum.
+        */
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
+               return;
+
+#ifdef HAVE_VXLAN_RX_OFFLOAD
+       /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+        * it in the driver, hardware does not do it for us.
+        * Since L3L4P bit was set we assume a valid IHL value (>=5)
+        * so the total length of IPv4 header is IHL*4 bytes
+        * The UDP_0 bit *may* bet set if the *inner* header is UDP
+        */
+       if (ipv4_tunnel) {
+               i40e_set_transport_header(skb);
+               if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+                   (udp_hdr(skb)->check != 0)) {
+                       rx_udp_csum = udp_csum(skb);
+                       iph = ip_hdr(skb);
+                       csum = csum_tcpudp_magic(
+                                       iph->saddr, iph->daddr,
+                                       (skb->len - skb_transport_offset(skb)),
+                                       IPPROTO_UDP, rx_udp_csum);
+
+                       if (udp_hdr(skb)->check != csum)
+                               goto checksum_fail;
+
+               } /* else its GRE and so no outer UDP header */
+       }
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+#ifdef I40E_ADD_PROBES
+       if ((decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+           (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4))
+               vsi->back->rx_ip4_cso++;
+       if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP)
+               vsi->back->rx_tcp_cso++;
+       else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_UDP)
+               vsi->back->rx_udp_cso++;
+       else if (decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_SCTP)
+               vsi->back->rx_sctp_cso++;
+#endif
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+#ifdef HAVE_SKBUFF_CSUM_LEVEL
+       skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+#endif
+       return;
+
+checksum_fail:
+       vsi->back->hw_csum_rx_error++;
+}
+
+#ifdef NETIF_F_RXHASH
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+                              union i40e_rx_desc *rx_desc)
+{
+       const __le64 rss_mask =
+               cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+                           I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+       if ((ring->netdev->features & NETIF_F_RXHASH) &&
+           (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+               return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+       else
+               return 0;
+}
+
+/**
+ * i40e_ptype_to_hash - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline int i40e_ptype_to_hash(u8 ptype)
+{
+       struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+       if (!decoded.known)
+               return PKT_HASH_TYPE_NONE;
+
+       if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+           decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+               return PKT_HASH_TYPE_L4;
+       else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+                decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+               return PKT_HASH_TYPE_L3;
+       else
+               return PKT_HASH_TYPE_L2;
+}
+
+#endif /* NETIF_F_RXHASH */
+/**
+ * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
+ * @rx_ring:  rx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns number of packets cleaned
+ **/
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
+{
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+       u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+       const int current_node = numa_node_id();
+       struct i40e_vsi *vsi = rx_ring->vsi;
+       u16 i = rx_ring->next_to_clean;
+       union i40e_rx_desc *rx_desc;
+       u32 rx_error, rx_status;
+       u8 rx_ptype;
+       u64 qword;
+
+       do {
+               struct i40e_rx_buffer *rx_bi;
+               struct sk_buff *skb;
+               u16 vlan_tag;
+               /* return some buffers to hardware, one at a time is too slow */
+               if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+                       i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+                       cleaned_count = 0;
+               }
+
+               i = rx_ring->next_to_clean;
+               rx_desc = I40E_RX_DESC(rx_ring, i);
+               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+               rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                       I40E_RXD_QW1_STATUS_SHIFT;
+
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
+                       break;
+
+               /* This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * DD bit is set.
+                */
+               rmb();
+               if (i40e_rx_is_programming_status(qword)) {
+                       i40e_clean_programming_status(rx_ring, rx_desc);
+                       I40E_RX_INCREMENT(rx_ring, i);
+                       continue;
+               }
+               rx_bi = &rx_ring->rx_bi[i];
+               skb = rx_bi->skb;
+               if (likely(!skb)) {
+                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                                       rx_ring->rx_hdr_len);
+                       if (!skb) {
+                               rx_ring->rx_stats.alloc_buff_failed++;
+                               break;
+                       }
+
+                       /* initialize queue mapping */
+                       skb_record_rx_queue(skb, rx_ring->queue_index);
+                       /* we are reusing so sync this buffer for CPU use */
+                       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                                     rx_bi->dma,
+                                                     0,
+                                                     rx_ring->rx_hdr_len,
+                                                     DMA_FROM_DEVICE);
+               }
+               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+               rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+                               I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+               rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+                        I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+                          I40E_RXD_QW1_ERROR_SHIFT;
+               rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+                          I40E_RXD_QW1_PTYPE_SHIFT;
+               prefetch(rx_bi->page);
+               rx_bi->skb = NULL;
+               cleaned_count++;
+               if (rx_hbo || rx_sph) {
+                       int len;
+
+                       if (rx_hbo)
+                               len = I40E_RX_HDR_SIZE;
+                       else
+                               len = rx_header_len;
+                       memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
+               } else if (skb->len == 0) {
+                       int len;
+
+                       len = (rx_packet_len > skb_headlen(skb) ?
+                               skb_headlen(skb) : rx_packet_len);
+                       memcpy(__skb_put(skb, len),
+                              rx_bi->page + rx_bi->page_offset,
+                              len);
+                       rx_bi->page_offset += len;
+                       rx_packet_len -= len;
+               }
+
+               /* Get the rest of the data if this was a header split */
+               if (rx_packet_len) {
+                       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+                                          rx_bi->page,
+                                          rx_bi->page_offset,
+                                          rx_packet_len);
+
+                       skb->len += rx_packet_len;
+                       skb->data_len += rx_packet_len;
+                       skb->truesize += rx_packet_len;
+
+                       if ((page_count(rx_bi->page) == 1) &&
+                           (page_to_nid(rx_bi->page) == current_node))
+                               get_page(rx_bi->page);
+                       else
+                               rx_bi->page = NULL;
+
+                       dma_unmap_page(rx_ring->dev,
+                                      rx_bi->page_dma,
+                                      PAGE_SIZE / 2,
+                                      DMA_FROM_DEVICE);
+                       rx_bi->page_dma = 0;
+               }
+               I40E_RX_INCREMENT(rx_ring, i);
+
+               if (unlikely(
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                       struct i40e_rx_buffer *next_buffer;
+
+                       next_buffer = &rx_ring->rx_bi[i];
+                       next_buffer->skb = skb;
+                       rx_ring->rx_stats.non_eop_descs++;
+                       continue;
+               }
+
+               /* ERR_MASK will only have valid bits if EOP set */
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+
+#ifdef NETIF_F_RXHASH
+               skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+                            i40e_ptype_to_hash(rx_ptype));
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
+               if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+                       i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+                                          I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+                                          I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+                       rx_ring->last_rx_timestamp = jiffies;
+               }
+
+#endif /* HAVE_PTP_1588_CLOCK */
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
+               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+                        : 0;
+#ifdef I40E_FCOE
+               if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+#endif
+               skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
+               i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+               rx_desc->wb.qword1.status_error_len = 0;
+
+       } while (likely(total_rx_packets < budget));
+
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
+       rx_ring->q_vector->rx.total_packets += total_rx_packets;
+       rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+       return total_rx_packets;
+}
+
+/**
+ * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
+ * @rx_ring:  rx ring to clean
+ * @budget:   how many cleans we're allowed
+ *
+ * Returns number of packets cleaned
+ **/
+static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+{
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+       struct i40e_vsi *vsi = rx_ring->vsi;
+       u16 i;
+       union i40e_rx_desc *rx_desc;
+       u32 rx_error, rx_status;
+       u16 rx_packet_len;
+       u8 rx_ptype;
+       u64 qword;
+
+       do {
+               struct i40e_rx_buffer *rx_bi;
+               struct sk_buff *skb;
+               u16 vlan_tag;
+               /* return some buffers to hardware, one at a time is too slow */
+               if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+                       i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
+                       cleaned_count = 0;
+               }
+
+               i = rx_ring->next_to_clean;
+               rx_desc = I40E_RX_DESC(rx_ring, i);
+               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+               rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                       I40E_RXD_QW1_STATUS_SHIFT;
+
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
+                       break;
+
+               /* This memory barrier is needed to keep us from reading
+                * any other fields out of the rx_desc until we know the
+                * DD bit is set.
+                */
+               rmb();
+
+               if (i40e_rx_is_programming_status(qword)) {
+                       i40e_clean_programming_status(rx_ring, rx_desc);
+                       I40E_RX_INCREMENT(rx_ring, i);
+                       continue;
+               }
+               rx_bi = &rx_ring->rx_bi[i];
+               skb = rx_bi->skb;
+               prefetch(skb->data);
+
+               rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+                               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+
+               rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+                          I40E_RXD_QW1_ERROR_SHIFT;
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+                          I40E_RXD_QW1_PTYPE_SHIFT;
+               rx_bi->skb = NULL;
+               cleaned_count++;
+
+               /* Get the header and possibly the whole packet
+                * If this is an skb from previous receive dma will be 0
+                */
+               skb_put(skb, rx_packet_len);
+               dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
+                                DMA_FROM_DEVICE);
+               rx_bi->dma = 0;
+
+               I40E_RX_INCREMENT(rx_ring, i);
+
+               if (unlikely(
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                       rx_ring->rx_stats.non_eop_descs++;
+                       continue;
+               }
+
+               /* ERR_MASK will only have valid bits if EOP set */
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+
+#ifdef NETIF_F_RXHASH
+               skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+                            i40e_ptype_to_hash(rx_ptype));
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
+               if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+                       i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+                                          I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+                                          I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+                       rx_ring->last_rx_timestamp = jiffies;
+               }
+
+#endif /* HAVE_PTP_1588_CLOCK */
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
+               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+               i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+                        ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+                        : 0;
+#ifdef I40E_FCOE
+               if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+#endif
+               skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
+               i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+               rx_desc->wb.qword1.status_error_len = 0;
+       } while (likely(total_rx_packets < budget));
+
+
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
+       rx_ring->q_vector->rx.total_packets += total_rx_packets;
+       rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+       return total_rx_packets;
+}
+
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+       u32 val;
+
+       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+             I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+             (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+             (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+       return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_PFINT_DYN_CTLN
+
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+                                         struct i40e_q_vector *q_vector)
+{
+       struct i40e_hw *hw = &vsi->back->hw;
+       bool rx = false, tx = false;
+       u32 rxval, txval;
+       int vector;
+
+       vector = (q_vector->v_idx + vsi->base_vector);
+
+       /* avoid dynamic calulation if in countdown mode OR if
+        * all dynamic is disabled
+        */
+       rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+       if (q_vector->itr_countdown > 0 ||
+           (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+            !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+               goto enable_int;
+       }
+
+       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+               rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+               rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
+       }
+
+       if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+               tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+               txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
+       }
+
+       if (rx || tx) {
+               /* get the higher of the two ITR adjustments and
+                * use the same value for both ITR registers
+                * when in adaptive mode (rx and/or tx)
+                */
+               u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+               q_vector->tx.itr = q_vector->rx.itr = itr;
+               txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+               tx = true;
+               rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+               rx = true;
+       }
+
+       /* only need to enable the interrupt once, but need
+        * to possibly update both ITR values
+        */
+       if (rx) {
+               /* set the INTENA_MSK_MASK so that this first write
+                * won't actually enable the interrupt, instead just
+                * updating the ITR (it's bit 31 PF and VF)
+                */
+               rxval |= BIT(31);
+               /* don't check _DOWN because interrupt isn't being enabled */
+               wr32(hw, INTREG(vector - 1), rxval);
+       }
+
+enable_int:
+       if (!test_bit(__I40E_DOWN, &vsi->state))
+               wr32(hw, INTREG(vector - 1), txval);
+
+       if (q_vector->itr_countdown)
+               q_vector->itr_countdown--;
+       else
+               q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
+}
+
+/**
+ * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40e_napi_poll(struct napi_struct *napi, int budget)
+{
+       struct i40e_q_vector *q_vector =
+                              container_of(napi, struct i40e_q_vector, napi);
+       struct i40e_vsi *vsi = q_vector->vsi;
+       u64 flags = vsi->back->flags;
+       bool clean_complete = true;
+       bool arm_wb = false;
+       struct i40e_ring *ring;
+       int budget_per_ring;
+       int cleaned;
+
+       if (test_bit(__I40E_DOWN, &vsi->state)) {
+               napi_complete(napi);
+               return 0;
+       }
+
+       /* Since the actual Tx work is minimal, we can give the Tx a larger
+        * budget and be more aggressive about cleaning up the Tx descriptors.
+        */
+       i40e_for_each_ring(ring, q_vector->tx) {
+               clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+               arm_wb |= ring->arm_wb;
+               ring->arm_wb = false;
+       }
+
+       /* if i40e_busy_poll() has the vector or netpoll flag is set
+        * then, we skip rx
+        */
+       if (flags & I40E_FLAG_IN_NETPOLL)
+               return budget;
+       else if (!i40e_qv_lock_napi(q_vector))
+               return budget;
+
+       /* We attempt to distribute budget to each Rx queue fairly, but don't
+        * allow the budget to go below 1 because that would exit polling early.
+        */
+       budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+
+       i40e_for_each_ring(ring, q_vector->rx) {
+
+               if (ring_is_ps_enabled(ring))
+                       cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
+               else
+                       cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+               /* if we didn't clean as many as budgeted, we must be done */
+               clean_complete &= (budget_per_ring != cleaned);
+       }
+
+       i40e_qv_unlock_napi(q_vector);
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+       /* if netdev is disabled we need to stop polling */
+       if (!netif_running(vsi->netdev))
+               clean_complete = true;
+
+#endif
+       /* If work not completed, return budget and polling will return */
+       if (!clean_complete) {
+               if (arm_wb)
+                       i40e_force_wb(vsi, q_vector);
+               return budget;
+       }
+
+       /* Work is done so exit the polling mode and re-enable the interrupt */
+       napi_complete(napi);
+       if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+               i40e_update_enable_itr(vsi, q_vector);
+       } else { /* Legacy mode */
+               struct i40e_hw *hw = &vsi->back->hw;
+               /* We re-enable the queue 0 cause, but
+                * don't worry about dynamic_enable
+                * because we left it on for the other
+                * possible interrupts during napi
+                */
+               u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+
+               qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_RQCTL(0), qval);
+               qval = rd32(hw, I40E_QINT_TQCTL(0));
+               qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_TQCTL(0), qval);
+               i40e_irq_dynamic_enable_icr0(vsi->back);
+       }
+       return 0;
+}
+/**
+ * i40e_atr - Add a Flow Director ATR filter
+ * @tx_ring:  ring to add programming descriptor to
+ * @skb:      send buffer
+ * @tx_flags: send tx flags
+ * @protocol: wire protocol
+ **/
+static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                    u32 tx_flags, __be16 protocol)
+{
+       struct i40e_filter_program_desc *fdir_desc;
+       struct i40e_pf *pf = tx_ring->vsi->back;
+       union {
+               unsigned char *network;
+               struct iphdr *ipv4;
+               struct ipv6hdr *ipv6;
+       } hdr;
+       struct tcphdr *th;
+       unsigned int hlen;
+       u32 flex_ptype, dtype_cmd;
+       u16 i;
+
+       /* make sure ATR is enabled */
+       if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
+               return;
+
+       if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+               return;
+
+       /* if sampling is disabled do nothing */
+       if (!tx_ring->atr_sample_rate)
+               return;
+
+       if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
+               return;
+
+#ifdef HAVE_SKB_INNER_NETWORK_HEADER
+       if ((tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
+               hdr.network = skb_inner_network_header(skb);
+               hlen = skb_inner_network_header_len(skb);
+       } else {
+#endif
+               /* snag network header to get L4 type and address */
+               hdr.network = skb_network_header(skb);
+
+               /* Currently only IPv4/IPv6 with TCP is supported */
+               /* access ihl as u8 to avoid unaligned access on ia64 */
+               if (tx_flags & I40E_TX_FLAGS_IPV4)
+                       hlen = (hdr.network[0] & 0x0F) << 2;
+               else if (protocol == htons(ETH_P_IPV6))
+                       hlen = sizeof(struct ipv6hdr);
+               else
+                       return;
+#ifdef HAVE_SKB_INNER_NETWORK_HEADER
+       }
+#endif
+
+       /* Currently only IPv4/IPv6 with TCP is supported */
+       /* Note: tx_flags gets modified to reflect inner protocols in
+        * tx_enable_csum function if encap is enabled.
+        */
+       if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
+           (hdr.ipv4->protocol != IPPROTO_TCP))
+               return;
+       else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
+                  (hdr.ipv6->nexthdr != IPPROTO_TCP))
+               return;
+
+       th = (struct tcphdr *)(hdr.network + hlen);
+
+       /* Due to lack of space, no more new filters can be programmed */
+       if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+               return;
+
+       tx_ring->atr_count++;
+
+       /* sample on all syn/fin/rst packets or once every atr sample rate */
+       if (!th->fin &&
+           !th->syn &&
+           !th->rst &&
+           (tx_ring->atr_count < tx_ring->atr_sample_rate))
+               return;
+
+       tx_ring->atr_count = 0;
+
+       /* grab the next descriptor */
+       i = tx_ring->next_to_use;
+       fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+       i++;
+       tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+       flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+                     I40E_TXD_FLTR_QW0_QINDEX_MASK;
+       flex_ptype |= (protocol == htons(ETH_P_IP)) ?
+                     (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+                      I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
+                     (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+                      I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+       flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
+
+       dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+       dtype_cmd |= (th->fin || th->rst) ?
+                    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+                     I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
+                    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+                     I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+       dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
+                    I40E_TXD_FLTR_QW1_DEST_SHIFT;
+
+       dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
+                    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+
+       dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+       if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+               dtype_cmd |=
+                       ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+       else
+               dtype_cmd |=
+                       ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+
+       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+       fdir_desc->rsvd = cpu_to_le32(0);
+       fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+       fdir_desc->fd_id = cpu_to_le32(0);
+}
+
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags:   the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise  returns 0 to indicate the flags has been set properly.
+ **/
+#ifdef I40E_FCOE
+inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+                                     struct i40e_ring *tx_ring, u32 *flags)
+#else
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+                                            struct i40e_ring *tx_ring,
+                                            u32 *flags)
+#endif
+{
+       __be16 protocol = skb->protocol;
+       u32  tx_flags = 0;
+
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+       if (protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+#else
+       if (protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->netdev->features & NETIF_F_HW_VLAN_TX)) {
+#endif
+               /* When HW VLAN acceleration is turned off by the user the
+                * stack sets the protocol to 8021q so that the driver
+                * can take any steps required to support the SW only
+                * VLAN handling.  In our case the driver doesn't need
+                * to take any further steps so just set the protocol
+                * to the encapsulated ethertype.
+                */
+               skb->protocol = vlan_get_protocol(skb);
+               goto out;
+       }
+
+       /* if we have a HW VLAN tag being added, default to the HW one */
+       if (skb_vlan_tag_present(skb)) {
+               tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+               tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+       /* else if it is a SW VLAN, check the next protocol and store the tag */
+       } else if (protocol == htons(ETH_P_8021Q)) {
+               struct vlan_hdr *vhdr, _vhdr;
+
+               vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+               if (!vhdr)
+                       return -EINVAL;
+
+               protocol = vhdr->h_vlan_encapsulated_proto;
+               tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+               tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+       }
+
+       if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+               goto out;
+
+       /* Insert 802.1p priority into VLAN header */
+       if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
+           (skb->priority != TC_PRIO_CONTROL)) {
+               tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
+               tx_flags |= (skb->priority & 0x7) <<
+                               I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
+               if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
+                       struct vlan_ethhdr *vhdr;
+
+                       if (skb_header_cloned(skb) &&
+                           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+                               return -ENOMEM;
+                       vhdr = (struct vlan_ethhdr *)skb->data;
+                       vhdr->h_vlan_TCI = htons(tx_flags >>
+                                                I40E_TX_FLAGS_VLAN_SHIFT);
+               } else {
+                       tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+               }
+       }
+
+out:
+       *flags = tx_flags;
+       return 0;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring:  ptr to the ring to send
+ * @skb:      ptr to the skb we're sending
+ * @hdr_len:  ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                   u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+                   u32 *cd_tunneling)
+{
+       u32 cd_cmd, cd_tso_len, cd_mss;
+       struct tcphdr *tcph;
+       struct iphdr *iph;
+       u32 l4len;
+       int err;
+#ifdef NETIF_F_TSO6
+       struct ipv6hdr *ipv6h;
+#endif
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
+       bool enc = skb->encapsulation;
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+
+       if (!skb_is_gso(skb))
+               return 0;
+
+       if (skb_header_cloned(skb)) {
+               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+               if (err)
+                       return err;
+       }
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
+       iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb);
+#else
+       iph = ip_hdr(skb);
+#endif
+
+       if (iph->version == 4) {
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
+               iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb);
+               tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+#else
+               iph = ip_hdr(skb);
+               tcph = tcp_hdr(skb);
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+               iph->tot_len = 0;
+               iph->check = 0;
+               tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                0, IPPROTO_TCP, 0);
+#ifdef NETIF_F_TSO6
+       } else if (skb_is_gso_v6(skb)) {
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
+               ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+               tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+#else
+               ipv6h = ipv6_hdr(skb);
+               tcph = tcp_hdr(skb);
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+               ipv6h->payload_len = 0;
+               tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+                                              0, IPPROTO_TCP, 0);
+#endif /* NETIF_F_TSO6 */
+       }
+
+#ifdef HAVE_ENCAP_TSO_OFFLOAD
+       l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+       *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data)
+                      : skb_transport_offset(skb);
+       *hdr_len += l4len;
+#else
+       l4len = tcp_hdrlen(skb);
+       *hdr_len = skb_transport_offset(skb) + l4len;
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+
+       /* find the field values */
+       cd_cmd = I40E_TX_CTX_DESC_TSO;
+       cd_tso_len = skb->len - *hdr_len;
+       cd_mss = skb_shinfo(skb)->gso_size;
+       *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+                               ((u64)cd_tso_len <<
+                                I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+                               ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+       return 1;
+}
+
+#ifdef HAVE_PTP_1588_CLOCK
+/**
+ * i40e_tsyn - set up the tsyn context descriptor
+ * @tx_ring:  ptr to the ring to send
+ * @skb:      ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ *
+ * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
+ **/
+static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                    u32 tx_flags, u64 *cd_type_cmd_tso_mss)
+{
+       struct i40e_pf *pf;
+
+#ifdef SKB_SHARED_TX_IS_UNION
+       if (likely(!(skb_tx(skb)->hardware)))
+#else
+       if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+#endif
+               return 0;
+
+       /* Tx timestamps cannot be sampled when doing TSO */
+       if (tx_flags & I40E_TX_FLAGS_TSO)
+               return 0;
+
+       /* only timestamp the outbound packet if the user has requested it and
+        * we are not already transmitting a packet to be timestamped
+        */
+       pf = i40e_netdev_to_pf(tx_ring->netdev);
+       if (!(pf->flags & I40E_FLAG_PTP))
+               return 0;
+
+       if (pf->ptp_tx && !pf->ptp_tx_skb) {
+#ifdef SKB_SHARED_TX_IS_UNION
+               skb_tx(skb)->in_progress = 1;
+#else
+               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+#endif
+               pf->ptp_tx_skb = skb_get(skb);
+       } else {
+               return 0;
+       }
+
+       *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
+                               I40E_TXD_CTX_QW1_CMD_SHIFT;
+
+       return 1;
+}
+
+#endif /* HAVE_PTP_1588_CLOCK */
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: pointer to Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+                               u32 *td_cmd, u32 *td_offset,
+                               struct i40e_ring *tx_ring,
+                               u32 *cd_tunneling)
+{
+       struct ipv6hdr *this_ipv6_hdr;
+       unsigned int this_tcp_hdrlen;
+       struct iphdr *this_ip_hdr;
+       u32 network_hdr_len;
+       u8 l4_hdr = 0;
+#ifdef HAVE_ENCAP_CSUM_OFFLOAD
+       u32 l4_tunnel = 0;
+
+       if (skb->encapsulation) {
+               switch (ip_hdr(skb)->protocol) {
+               case IPPROTO_UDP:
+                       l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+                       break;
+               default:
+                       return;
+               }
+               network_hdr_len = skb_inner_network_header_len(skb);
+               this_ip_hdr = inner_ip_hdr(skb);
+               this_ipv6_hdr = inner_ipv6_hdr(skb);
+               this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+               if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+                       if (*tx_flags & I40E_TX_FLAGS_TSO) {
+                               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+                               ip_hdr(skb)->check = 0;
+#ifdef I40E_ADD_PROBES
+                               tx_ring->vsi->back->tx_ip4_cso++;
+#endif
+                       } else {
+                               *cd_tunneling |=
+                                        I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+                       }
+               } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       if (*tx_flags & I40E_TX_FLAGS_TSO)
+                               ip_hdr(skb)->check = 0;
+               }
+
+               /* Now set the ctx descriptor fields */
+               *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+                                  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT      |
+                                  l4_tunnel                             |
+                                  ((skb_inner_network_offset(skb) -
+                                       skb_transport_offset(skb)) >> 1) <<
+                                  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+               if (this_ip_hdr->version == 6) {
+                       *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       *tx_flags |= I40E_TX_FLAGS_IPV6;
+               }
+
+       } else {
+               network_hdr_len = skb_network_header_len(skb);
+               this_ip_hdr = ip_hdr(skb);
+               this_ipv6_hdr = ipv6_hdr(skb);
+               this_tcp_hdrlen = tcp_hdrlen(skb);
+       }
+#else
+       network_hdr_len = skb_network_header_len(skb);
+       this_ip_hdr = ip_hdr(skb);
+       this_ipv6_hdr = ipv6_hdr(skb);
+       this_tcp_hdrlen = tcp_hdrlen(skb);
+#endif /* HAVE_ENCAP_CSUM_OFFLOAD */
+
+       /* Enable IP checksum offloads */
+       if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+               l4_hdr = this_ip_hdr->protocol;
+               /* the stack computes the IP header already, the only time we
+                * need the hardware to recompute it is in the case of TSO.
+                */
+               if (*tx_flags & I40E_TX_FLAGS_TSO) {
+                       *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+                       this_ip_hdr->check = 0;
+#ifdef I40E_ADD_PROBES
+                       tx_ring->vsi->back->tx_ip4_cso++;
+#endif
+               } else {
+                       *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+               }
+               /* Now set the td_offset for IP header length */
+               *td_offset = (network_hdr_len >> 2) <<
+                             I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+#ifdef NETIF_F_IPV6_CSUM
+       } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+               l4_hdr = this_ipv6_hdr->nexthdr;
+               *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+               /* Now set the td_offset for IP header length */
+               *td_offset = (network_hdr_len >> 2) <<
+                             I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+#endif
+       }
+       /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+       *td_offset |= (skb_network_offset(skb) >> 1) <<
+                      I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+       /* Enable L4 checksum offloads */
+       switch (l4_hdr) {
+       case IPPROTO_TCP:
+               /* enable checksum offloads */
+               *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+               *td_offset |= (this_tcp_hdrlen >> 2) <<
+                      I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+#ifdef I40E_ADD_PROBES
+               tx_ring->vsi->back->tx_tcp_cso++;
+#endif
+               break;
+       case IPPROTO_SCTP:
+               /* enable SCTP checksum offload */
+#ifdef HAVE_SCTP
+               *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+               *td_offset |= (sizeof(struct sctphdr) >> 2) <<
+                              I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+#ifdef I40E_ADD_PROBES
+                       tx_ring->vsi->back->tx_sctp_cso++;
+#endif
+#endif /* HAVE_SCTP */
+               break;
+       case IPPROTO_UDP:
+               /* enable UDP checksum offload */
+               *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+               *td_offset |= (sizeof(struct udphdr) >> 2) <<
+                              I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+#ifdef I40E_ADD_PROBES
+                       tx_ring->vsi->back->tx_udp_cso++;
+#endif
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring:  ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+                              const u64 cd_type_cmd_tso_mss,
+                              const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+       struct i40e_tx_context_desc *context_desc;
+       int i = tx_ring->next_to_use;
+
+       if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+           !cd_tunneling && !cd_l2tag2)
+               return;
+
+       /* grab the next descriptor */
+       context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+       i++;
+       tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+       /* cpu_to_le32 and assign to struct fields */
+       context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+       context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+       context_desc->rsvd = cpu_to_le16(0);
+       context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
+{
+       struct skb_frag_struct *frag;
+       bool linearize = false;
+       unsigned int size = 0;
+       u16 num_frags;
+       u16 gso_segs;
+
+       num_frags = skb_shinfo(skb)->nr_frags;
+       gso_segs = skb_shinfo(skb)->gso_segs;
+
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+               u16 j = 0;
+
+               if (num_frags < (I40E_MAX_BUFFER_TXD))
+                       goto linearize_chk_done;
+               /* try the simple math, if we have too many frags per segment */
+               if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+                                                       I40E_MAX_BUFFER_TXD) {
+                       linearize = true;
+                       goto linearize_chk_done;
+               }
+               frag = &skb_shinfo(skb)->frags[0];
+               /* we might still have more fragments per segment */
+               do {
+                       size += skb_frag_size(frag);
+                       frag++; j++;
+                       if ((size >= skb_shinfo(skb)->gso_size) &&
+                           (j < I40E_MAX_BUFFER_TXD)) {
+                               size = (size % skb_shinfo(skb)->gso_size);
+                               j = (size) ? 1 : 0;
+                       }
+                       if (j == I40E_MAX_BUFFER_TXD) {
+                               linearize = true;
+                               break;
+                       }
+                       num_frags--;
+               } while (num_frags);
+       } else {
+               if (num_frags >= I40E_MAX_BUFFER_TXD)
+                       linearize = true;
+       }
+
+linearize_chk_done:
+       return linearize;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       /* Memory barrier before checking head and tail */
+       smp_mb();
+
+       /* Check again in a case another CPU has just made room available. */
+       if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+               return -EBUSY;
+
+       /* A reprieve! - use start_queue because it doesn't call schedule */
+       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       ++tx_ring->tx_stats.restart_queue;
+       return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+#ifdef I40E_FCOE
+inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+#else
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+#endif
+{
+       if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+               return 0;
+       return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring:  ring to send buffer on
+ * @skb:      send buffer
+ * @first:    first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len:  size of the packet header
+ * @td_cmd:   the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+#ifdef I40E_FCOE
+inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                       struct i40e_tx_buffer *first, u32 tx_flags,
+                       const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
+static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                              struct i40e_tx_buffer *first, u32 tx_flags,
+                              const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#endif
+{
+       struct skb_frag_struct *frag;
+       struct i40e_tx_buffer *tx_bi;
+       struct i40e_tx_desc *tx_desc;
+       u16 i = tx_ring->next_to_use;
+       unsigned int data_len;
+       unsigned int size;
+       u32 td_tag = 0;
+       dma_addr_t dma;
+       u16 gso_segs;
+
+       if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+               td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+               td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+                        I40E_TX_FLAGS_VLAN_SHIFT;
+       }
+
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+               gso_segs = skb_shinfo(skb)->gso_segs;
+       else
+               gso_segs = 1;
+
+#ifdef I40E_ADD_PROBES
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+               tx_ring->vsi->back->tcp_segs += gso_segs;
+
+#endif
+       data_len = skb->data_len;
+       size = skb_headlen(skb);
+
+       /* multiply data chunks by size of headers */
+       first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+       first->gso_segs = gso_segs;
+       first->skb = skb;
+       first->tx_flags = tx_flags;
+
+       dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+       tx_desc = I40E_TX_DESC(tx_ring, i);
+       tx_bi = first;
+
+       for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+               if (dma_mapping_error(tx_ring->dev, dma))
+                       goto dma_error;
+
+               /* record length, and DMA address */
+               dma_unmap_len_set(tx_bi, len, size);
+               dma_unmap_addr_set(tx_bi, dma, dma);
+
+               tx_desc->buffer_addr = cpu_to_le64(dma);
+
+               while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+                       tx_desc->cmd_type_offset_bsz =
+                               build_ctob(td_cmd, td_offset,
+                                          I40E_MAX_DATA_PER_TXD, td_tag);
+
+                       tx_desc++;
+                       i++;
+                       if (i == tx_ring->count) {
+                               tx_desc = I40E_TX_DESC(tx_ring, 0);
+                               i = 0;
+                       }
+                       tx_bi = &tx_ring->tx_bi[i];
+                       memset(tx_bi, 0, sizeof(struct i40e_tx_buffer));
+
+                       dma += I40E_MAX_DATA_PER_TXD;
+                       size -= I40E_MAX_DATA_PER_TXD;
+
+                       tx_desc->buffer_addr = cpu_to_le64(dma);
+               }
+
+               if (likely(!data_len))
+                       break;
+
+               tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+                                                         size, td_tag);
+
+               tx_desc++;
+               i++;
+               if (i == tx_ring->count) {
+                       tx_desc = I40E_TX_DESC(tx_ring, 0);
+                       i = 0;
+               }
+
+               size = skb_frag_size(frag);
+               data_len -= size;
+
+               dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+                                      DMA_TO_DEVICE);
+
+               tx_bi = &tx_ring->tx_bi[i];
+               memset(tx_bi, 0, sizeof(struct i40e_tx_buffer));
+       }
+
+       /* Place RS bit on last descriptor of any packet that spans across the
+        * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+        */
+#ifdef HAVE_SKB_XMIT_MORE
+       if (skb->xmit_more  &&
+           ((tx_ring->packet_stride & WB_STRIDE) != WB_STRIDE) &&
+           (first <= &tx_ring->tx_bi[i]) &&
+           (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+               tx_ring->packet_stride++;
+               tx_desc->cmd_type_offset_bsz =
+                       build_ctob(td_cmd, td_offset, size, td_tag) |
+                       cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+                                        I40E_TXD_QW1_CMD_SHIFT);
+       } else {
+               tx_ring->packet_stride = 0;
+               tx_desc->cmd_type_offset_bsz =
+                       build_ctob(td_cmd, td_offset, size, td_tag) |
+                       cpu_to_le64((u64)I40E_TXD_CMD <<
+                                        I40E_TXD_QW1_CMD_SHIFT);
+       }
+#else
+       tx_desc->cmd_type_offset_bsz =
+               build_ctob(td_cmd, td_offset, size, td_tag) |
+               cpu_to_le64((u64)I40E_TXD_CMD <<
+                                I40E_TXD_QW1_CMD_SHIFT);
+
+#endif
+
+       netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                tx_ring->queue_index),
+                            first->bytecount);
+
+       /* Force memory writes to complete before letting h/w
+        * know there are new descriptors to fetch.  (Only
+        * applicable for weak-ordered memory model archs,
+        * such as IA-64).
+        */
+       wmb();
+
+       /* set next_to_watch value indicating a packet is present */
+       first->next_to_watch = tx_desc;
+
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       tx_ring->next_to_use = i;
+
+       i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+       /* notify HW of packet */
+#ifdef HAVE_SKB_XMIT_MORE
+       if (!skb->xmit_more ||
+           netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+                                                  tx_ring->queue_index)))
+               writel(i, tx_ring->tail);
+       else
+               prefetchw(tx_desc + 1);
+#else
+       writel(i, tx_ring->tail);
+#endif /* HAVE_XMIT_MORE */
+
+       return;
+
+dma_error:
+       dev_info(tx_ring->dev, "TX DMA map failed\n");
+
+       /* clear dma mappings for failed tx_bi map */
+       for (;;) {
+               tx_bi = &tx_ring->tx_bi[i];
+               i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+               if (tx_bi == first)
+                       break;
+               if (i == 0)
+                       i = tx_ring->count;
+               i--;
+       }
+
+       tx_ring->next_to_use = i;
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+#ifdef I40E_FCOE
+inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+                                     struct i40e_ring *tx_ring)
+#else
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+                                            struct i40e_ring *tx_ring)
+#endif
+{
+       unsigned int f;
+       int count = 0;
+
+       /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+        *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+        *       + 4 desc gap to avoid the cache line where head is,
+        *       + 1 desc for context descriptor,
+        * otherwise try next time
+        */
+       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+       count += TXD_USE_COUNT(skb_headlen(skb));
+       if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+               tx_ring->tx_stats.tx_busy++;
+               return 0;
+       }
+       return count;
+}
+
+#if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
+/**
+ * i40e_lan_select_queue - Select the right Tx queue for the skb for LAN VSI
+ * @netdev: network interface device structure
+ * @skb:    send buffer
+ *
+ * Returns the index of the selected Tx queue
+ **/
+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb)
+{
+       return skb_tx_hash(netdev, skb);
+}
+
+#endif /* !HAVE_NET_DEVICE_OPS && HAVE_NETDEV_SELECT_QUEUE */
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb:     send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+                                       struct i40e_ring *tx_ring)
+{
+       u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+       u32 cd_tunneling = 0, cd_l2tag2 = 0;
+       struct i40e_tx_buffer *first;
+       u32 td_offset = 0;
+       u32 tx_flags = 0;
+       __be16 protocol;
+       u32 td_cmd = 0;
+       u8 hdr_len = 0;
+#ifdef HAVE_PTP_1588_CLOCK
+       int tsyn;
+#endif /* HAVE_PTP_1588_CLOCK */
+       int tso;
+
+       if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+               return NETDEV_TX_BUSY;
+
+       /* prepare the xmit flags */
+       if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+               goto out_drop;
+
+       /* obtain protocol of skb */
+       protocol = skb->protocol;
+
+       /* record the location of the first descriptor for this packet */
+       first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+       /* setup IPv4/IPv6 offloads */
+       if (protocol == htons(ETH_P_IP))
+               tx_flags |= I40E_TX_FLAGS_IPV4;
+       else if (protocol == htons(ETH_P_IPV6))
+               tx_flags |= I40E_TX_FLAGS_IPV6;
+
+       tso = i40e_tso(tx_ring, skb, &hdr_len,
+                      &cd_type_cmd_tso_mss, &cd_tunneling);
+
+       if (tso < 0)
+               goto out_drop;
+       else if (tso)
+               tx_flags |= I40E_TX_FLAGS_TSO;
+
+       if (i40e_chk_linearize(skb, tx_flags)) {
+               if (skb_linearize(skb))
+                       goto out_drop;
+               tx_ring->tx_stats.tx_linearize++;
+       }
+       skb_tx_timestamp(skb);
+
+#ifdef HAVE_PTP_1588_CLOCK
+       tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
+
+       if (tsyn)
+               tx_flags |= I40E_TX_FLAGS_TSYN;
+
+#endif /* HAVE_PTP_1588_CLOCK */
+       /* always enable CRC insertion offload */
+       td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+       /* Always offload the checksum, since it's in the data descriptor */
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               tx_flags |= I40E_TX_FLAGS_CSUM;
+
+               i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
+                                   tx_ring, &cd_tunneling);
+       }
+
+       i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+                          cd_tunneling, cd_l2tag2);
+
+       /* Add Flow Director ATR if it's enabled.
+        *
+        * NOTE: this must always be directly before the data descriptor.
+        */
+       i40e_atr(tx_ring, skb, tx_flags, protocol);
+
+       i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+                   td_cmd, td_offset);
+
+#ifndef HAVE_TRANS_START_IN_QUEUE
+       tx_ring->netdev->trans_start = jiffies;
+#endif
+       return NETDEV_TX_OK;
+
+out_drop:
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+/**
+ * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb:    send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
+
+       /* hardware can't handle really short frames, hardware padding works
+        * beyond this point
+        */
+       if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+               if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+                       return NETDEV_TX_OK;
+               skb->len = I40E_MIN_TX_LEN;
+               skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+       }
+
+       return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_txrx.h
new file mode 100644 (file)
index 0000000..dc4ed1d
--- /dev/null
@@ -0,0 +1,397 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
+/* Interrupt Throttling and Rate Limiting Goodies */
+
+#define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR               0x0001  /* reg uses 2 usec resolution */
+#define I40E_ITR_100K              0x0005
+#define I40E_ITR_50K               0x000A
+#define I40E_ITR_20K               0x0019
+#define I40E_ITR_18K               0x001B
+#define I40E_ITR_8K                0x003E
+#define I40E_ITR_4K                0x007A
+#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF            I40E_ITR_20K
+#define I40E_ITR_TX_DEF            I40E_ITR_20K
+#define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
+#define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK      256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA                  BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K              125     /* 8000 ints/sec */
+#define I40E_INTRL_62K             16      /* 62500 ints/sec */
+#define I40E_INTRL_83K             12      /* 83333 ints/sec */
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+       I40E_IDX_ITR0 = 0,
+       I40E_IDX_ITR1 = 1,
+       I40E_IDX_ITR2 = 2,
+       I40E_ITR_NONE = 3       /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR    I40E_IDX_ITR0
+#define I40E_TX_ITR    I40E_IDX_ITR1
+#define I40E_PE_ITR    I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define i40e_pf_get_default_rss_hena(pf) I40E_DEFAULT_RSS_HENA
+
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512   512    /* Used for packet split */
+#define I40E_RXBUFFER_2048  2048
+#define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096  4096
+#define I40E_RXBUFFER_8192  8192
+#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE  I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE   16      /* Must be power of 2 */
+#define I40E_RX_INCREMENT(r, i) \
+       do {                                    \
+               (i)++;                          \
+               if ((i) == (r)->count)          \
+                       i = 0;                  \
+               r->next_to_clean = i;           \
+       } while (0)
+
+#define I40E_RX_NEXT_DESC(r, i, n)             \
+       do {                                    \
+               (i)++;                          \
+               if ((i) == (r)->count)          \
+                       i = 0;                  \
+               (n) = I40E_RX_DESC((r), (i));   \
+       } while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n)            \
+       do {                                            \
+               I40E_RX_NEXT_DESC((r), (i), (n));       \
+               prefetch((n));                          \
+       } while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MAX_BUFFER_TXD    8
+#define I40E_MIN_TX_LEN                17
+#define I40E_MAX_DATA_PER_TXD  8192
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define I40E_MIN_DESC_PENDING   4
+
+#define I40E_TX_FLAGS_CSUM             BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN          BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN          BIT(2)
+#define I40E_TX_FLAGS_TSO              BIT(3)
+#define I40E_TX_FLAGS_IPV4             BIT(4)
+#define I40E_TX_FLAGS_IPV6             BIT(5)
+#define I40E_TX_FLAGS_FCCRC            BIT(6)
+#define I40E_TX_FLAGS_FSO              BIT(7)
+#ifdef HAVE_PTP_1588_CLOCK
+#define I40E_TX_FLAGS_TSYN             BIT(8)
+#endif /* HAVE_PTP_1588_CLOCK */
+#define I40E_TX_FLAGS_FD_SB            BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     BIT(10)
+#define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
+#define I40E_TX_FLAGS_VLAN_SHIFT       16
+
+struct i40e_tx_buffer {
+       struct i40e_tx_desc *next_to_watch;
+       union {
+               struct sk_buff *skb;
+               void *raw_buf;
+       };
+       unsigned int bytecount;
+       unsigned short gso_segs;
+
+       DEFINE_DMA_UNMAP_ADDR(dma);
+       DEFINE_DMA_UNMAP_LEN(len);
+       u32 tx_flags;
+};
+
+struct i40e_rx_buffer {
+       struct sk_buff *skb;
+       void *hdr_buf;
+       dma_addr_t dma;
+       struct page *page;
+       dma_addr_t page_dma;
+       unsigned int page_offset;
+};
+
+struct i40e_queue_stats {
+       u64 packets;
+       u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
+       u64 restart_queue;
+       u64 tx_busy;
+       u64 tx_done_old;
+       u64 tx_linearize;
+};
+
+struct i40e_rx_queue_stats {
+       u64 non_eop_descs;
+       u64 alloc_page_failed;
+       u64 alloc_buff_failed;
+};
+
+enum i40e_ring_state_t {
+       __I40E_TX_FDIR_INIT_DONE,
+       __I40E_TX_XPS_INIT_DONE,
+       __I40E_TX_DETECT_HANG,
+       __I40E_HANG_CHECK_ARMED,
+       __I40E_RX_PS_ENABLED,
+       __I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+       test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+       set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+       clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+       test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+       set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+       clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+       test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+       set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+       clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+       struct i40e_ring *next;         /* pointer to next ring in q_vector */
+       void *desc;                     /* Descriptor ring memory */
+       struct device *dev;             /* Used for DMA mapping */
+       struct net_device *netdev;      /* netdev ring maps to */
+       union {
+               struct i40e_tx_buffer *tx_bi;
+               struct i40e_rx_buffer *rx_bi;
+       };
+       unsigned long state;
+       u16 queue_index;                /* Queue number of ring */
+       u8 dcb_tc;                      /* Traffic class of ring */
+       u8 __iomem *tail;
+
+       u16 count;                      /* Number of descriptors */
+       u16 reg_idx;                    /* HW register index of the ring */
+       u16 rx_hdr_len;
+       u16 rx_buf_len;
+       u8  dtype;
+#define I40E_RX_DTYPE_NO_SPLIT      0
+#define I40E_RX_DTYPE_HEADER_SPLIT  1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
+       u8  hsplit;
+#define I40E_RX_SPLIT_L2      0x1
+#define I40E_RX_SPLIT_IP      0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP    0x8
+
+       /* used in interrupt processing */
+       u16 next_to_use;
+       u16 next_to_clean;
+
+       u8 atr_sample_rate;
+       u8 atr_count;
+
+#ifdef HAVE_PTP_1588_CLOCK
+       unsigned long last_rx_timestamp;
+
+#endif /* HAVE_PTP_1588_CLOCK */
+       bool ring_active;               /* is ring online or not */
+       bool arm_wb;            /* do something to arm write back */
+       u16  packet_stride;
+
+       /* stats structs */
+       struct i40e_queue_stats stats;
+#ifdef HAVE_NDO_GET_STATS64
+       struct u64_stats_sync syncp;
+#endif
+       union {
+               struct i40e_tx_queue_stats tx_stats;
+               struct i40e_rx_queue_stats rx_stats;
+       };
+
+       unsigned int size;              /* length of descriptor ring in bytes */
+       dma_addr_t dma;                 /* physical address of ring */
+
+       struct i40e_vsi *vsi;           /* Backreference to associated VSI */
+       struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+       struct rcu_head rcu;            /* to avoid race on free */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+       I40E_LOWEST_LATENCY = 0,
+       I40E_LOW_LATENCY = 1,
+       I40E_BULK_LATENCY = 2,
+       I40E_ULTRA_LATENCY = 3,
+};
+
+struct i40e_ring_container {
+       /* array of pointers to rings */
+       struct i40e_ring *ring;
+       unsigned int total_bytes;       /* total bytes processed this int */
+       unsigned int total_packets;     /* total packets processed this int */
+       u16 count;
+       enum i40e_latency_range latency_range;
+       u16 itr;
+};
+
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+       for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+#define napi_hash_del(n)
+#define napi_hash_add(n)
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+                                   struct napi_struct *napi)
+{
+}
+
+static inline void i40e_qv_init_lock(struct i40e_q_vector *q_vector)
+{
+}
+
+static inline bool i40e_qv_lock_napi(struct i40e_q_vector *q_vector)
+{
+       return true;
+}
+
+static inline bool i40e_qv_unlock_napi(struct i40e_q_vector *q_vector)
+{
+       return false;
+}
+
+static inline bool i40e_qv_lock_poll(struct i40e_q_vector *q_vector)
+{
+       return false;
+}
+
+static inline bool i40e_qv_unlock_poll(struct i40e_q_vector *q_vector)
+{
+       return false;
+}
+
+static inline bool i40e_qv_busy_polling(struct i40e_q_vector *q_vector)
+{
+       return false;
+}
+
+static inline bool i40e_qv_disable(struct i40e_q_vector *q_vector)
+{
+       return true;
+}
+
+void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_headers(struct i40e_ring *rxr);
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+#if !defined(HAVE_NET_DEVICE_OPS) && defined(HAVE_NETDEV_SELECT_QUEUE)
+extern u16 i40e_lan_select_queue(struct net_device *netdev,
+                                struct sk_buff *skb);
+#endif
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40e_free_tx_resources(struct i40e_ring *tx_ring);
+void i40e_free_rx_resources(struct i40e_ring *rx_ring);
+int i40e_napi_poll(struct napi_struct *napi, int budget);
+#ifdef I40E_FCOE
+void i40e_tx_map(struct i40e_ring *, struct sk_buff *, struct i40e_tx_buffer *,
+                u32, const u8, u32, u32);
+int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
+int i40e_xmit_descriptor_count(struct sk_buff *, struct i40e_ring *);
+int i40e_tx_prepare_vlan_flags(struct sk_buff *, struct i40e_ring *,
+                              u32 *flags);
+#endif
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
+u32 i40e_get_tx_pending(struct i40e_ring *ring);
+
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
+#endif /* _I40E_TXRX_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_type.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_type.h
new file mode 100644 (file)
index 0000000..3e04d4e
--- /dev/null
@@ -0,0 +1,1730 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_devids.h"
+
+#define UNREFERENCED_XPARAMETER
+
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+
+#define I40E_MAX_PF                    16
+#define I40E_MAX_PF_VSI                        64
+#define I40E_MAX_PF_QP                 128
+#define I40E_MAX_VSI_QP                        16
+#define I40E_MAX_VF_VSI                        3
+#define I40E_MAX_CHAINED_RX_BUFFERS    5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS  16
+
+/* something less than 1 minute */
+#define I40E_HEARTBEAT_TIMEOUT         (HZ * 50)
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT           18000
+
+/* Check whether address is multicast. */
+#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address)     \
+       ((((u8 *)(address))[0] == ((u8)0xff)) && \
+       (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time)         ((time) * 1000)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+/* Data type manipulation macros. */
+
+
+#define I40E_HI_BYTE(x)                ((u8)(((x) >> 8) & 0xFF))
+#define I40E_LO_BYTE(x)                ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE        8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE        32
+
+#define I40E_DESC_UNUSED(R)    \
+       ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+       (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE  0x0
+#define I40E_QTX_CTL_VM_QUEUE  0x1
+#define I40E_QTX_CTL_PF_QUEUE  0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+       I40E_DEBUG_INIT                 = 0x00000001,
+       I40E_DEBUG_RELEASE              = 0x00000002,
+
+       I40E_DEBUG_LINK                 = 0x00000010,
+       I40E_DEBUG_PHY                  = 0x00000020,
+       I40E_DEBUG_HMC                  = 0x00000040,
+       I40E_DEBUG_NVM                  = 0x00000080,
+       I40E_DEBUG_LAN                  = 0x00000100,
+       I40E_DEBUG_FLOW                 = 0x00000200,
+       I40E_DEBUG_DCB                  = 0x00000400,
+       I40E_DEBUG_DIAG                 = 0x00000800,
+       I40E_DEBUG_FD                   = 0x00001000,
+
+       I40E_DEBUG_AQ_MESSAGE           = 0x01000000,
+       I40E_DEBUG_AQ_DESCRIPTOR        = 0x02000000,
+       I40E_DEBUG_AQ_DESC_BUFFER       = 0x04000000,
+       I40E_DEBUG_AQ_COMMAND           = 0x06000000,
+       I40E_DEBUG_AQ                   = 0x0F000000,
+
+       I40E_DEBUG_USER                 = 0xF0000000,
+
+       I40E_DEBUG_ALL                  = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_STATUS           0xB2
+#define I40E_PCI_LINK_WIDTH            0x3F0
+#define I40E_PCI_LINK_WIDTH_1          0x10
+#define I40E_PCI_LINK_WIDTH_2          0x20
+#define I40E_PCI_LINK_WIDTH_4          0x40
+#define I40E_PCI_LINK_WIDTH_8          0x80
+#define I40E_PCI_LINK_SPEED            0xF
+#define I40E_PCI_LINK_SPEED_2500       0x1
+#define I40E_PCI_LINK_SPEED_5000       0x2
+#define I40E_PCI_LINK_SPEED_8000       0x3
+
+/* Memory types */
+enum i40e_memset_type {
+       I40E_NONDMA_MEM = 0,
+       I40E_DMA_MEM
+};
+
+/* Memcpy types */
+enum i40e_memcpy_type {
+       I40E_NONDMA_TO_NONDMA = 0,
+       I40E_NONDMA_TO_DMA,
+       I40E_DMA_TO_DMA,
+       I40E_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with.  This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed.  For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+       I40E_MAC_UNKNOWN = 0,
+       I40E_MAC_X710,
+       I40E_MAC_XL710,
+       I40E_MAC_VF,
+       I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+       I40E_MEDIA_TYPE_UNKNOWN = 0,
+       I40E_MEDIA_TYPE_FIBER,
+       I40E_MEDIA_TYPE_BASET,
+       I40E_MEDIA_TYPE_BACKPLANE,
+       I40E_MEDIA_TYPE_CX4,
+       I40E_MEDIA_TYPE_DA,
+       I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+       I40E_FC_NONE = 0,
+       I40E_FC_RX_PAUSE,
+       I40E_FC_TX_PAUSE,
+       I40E_FC_FULL,
+       I40E_FC_PFC,
+       I40E_FC_DEFAULT
+};
+
+enum i40e_set_fc_aq_failures {
+       I40E_SET_FC_AQ_FAIL_NONE = 0,
+       I40E_SET_FC_AQ_FAIL_GET = 1,
+       I40E_SET_FC_AQ_FAIL_SET = 2,
+       I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+       I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum i40e_vsi_type {
+       I40E_VSI_MAIN   = 0,
+       I40E_VSI_VMDQ1  = 1,
+       I40E_VSI_VMDQ2  = 2,
+       I40E_VSI_CTRL   = 3,
+       I40E_VSI_FCOE   = 4,
+       I40E_VSI_MIRROR = 5,
+       I40E_VSI_SRIOV  = 6,
+       I40E_VSI_FDIR   = 7,
+       I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+       I40E_QUEUE_TYPE_RX = 0,
+       I40E_QUEUE_TYPE_TX,
+       I40E_QUEUE_TYPE_PE_CEQ,
+       I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+       enum i40e_aq_phy_type phy_type;
+       enum i40e_aq_link_speed link_speed;
+       u8 link_info;
+       u8 an_info;
+       u8 ext_info;
+       u8 loopback;
+       /* is Link Status Event notification to SW enabled */
+       bool lse_enable;
+       u16 max_frame_size;
+       bool crc_enable;
+       u8 pacing;
+       u8 requested_speeds;
+       u8 module_type[3];
+       /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP           0x03
+#define I40E_MODULE_TYPE_QSFP          0x0D
+       /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE    0x01
+#define I40E_MODULE_TYPE_40G_LR4       0x02
+#define I40E_MODULE_TYPE_40G_SR4       0x04
+#define I40E_MODULE_TYPE_40G_CR4       0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR   0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR   0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM  0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER   0x80
+       /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX   0x01
+#define I40E_MODULE_TYPE_1000BASE_LX   0x02
+#define I40E_MODULE_TYPE_1000BASE_CX   0x04
+#define I40E_MODULE_TYPE_1000BASE_T    0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+       I40E_CAP_PHY_TYPE_SGMII                 = BIT(I40E_PHY_TYPE_SGMII),
+       I40E_CAP_PHY_TYPE_1000BASE_KX           = BIT(I40E_PHY_TYPE_1000BASE_KX),
+       I40E_CAP_PHY_TYPE_10GBASE_KX4           = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+       I40E_CAP_PHY_TYPE_10GBASE_KR            = BIT(I40E_PHY_TYPE_10GBASE_KR),
+       I40E_CAP_PHY_TYPE_40GBASE_KR4           = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+       I40E_CAP_PHY_TYPE_XAUI                  = BIT(I40E_PHY_TYPE_XAUI),
+       I40E_CAP_PHY_TYPE_XFI                   = BIT(I40E_PHY_TYPE_XFI),
+       I40E_CAP_PHY_TYPE_SFI                   = BIT(I40E_PHY_TYPE_SFI),
+       I40E_CAP_PHY_TYPE_XLAUI                 = BIT(I40E_PHY_TYPE_XLAUI),
+       I40E_CAP_PHY_TYPE_XLPPI                 = BIT(I40E_PHY_TYPE_XLPPI),
+       I40E_CAP_PHY_TYPE_40GBASE_CR4_CU        = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_CR1_CU        = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_AOC           = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+       I40E_CAP_PHY_TYPE_40GBASE_AOC           = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+       I40E_CAP_PHY_TYPE_100BASE_TX            = BIT(I40E_PHY_TYPE_100BASE_TX),
+       I40E_CAP_PHY_TYPE_1000BASE_T            = BIT(I40E_PHY_TYPE_1000BASE_T),
+       I40E_CAP_PHY_TYPE_10GBASE_T             = BIT(I40E_PHY_TYPE_10GBASE_T),
+       I40E_CAP_PHY_TYPE_10GBASE_SR            = BIT(I40E_PHY_TYPE_10GBASE_SR),
+       I40E_CAP_PHY_TYPE_10GBASE_LR            = BIT(I40E_PHY_TYPE_10GBASE_LR),
+       I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU       = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+       I40E_CAP_PHY_TYPE_10GBASE_CR1           = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+       I40E_CAP_PHY_TYPE_40GBASE_CR4           = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+       I40E_CAP_PHY_TYPE_40GBASE_SR4           = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+       I40E_CAP_PHY_TYPE_40GBASE_LR4           = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+       I40E_CAP_PHY_TYPE_1000BASE_SX           = BIT(I40E_PHY_TYPE_1000BASE_SX),
+       I40E_CAP_PHY_TYPE_1000BASE_LX           = BIT(I40E_PHY_TYPE_1000BASE_LX),
+       I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL    = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+       I40E_CAP_PHY_TYPE_20GBASE_KR2           = BIT(I40E_PHY_TYPE_20GBASE_KR2)
+};
+
+struct i40e_phy_info {
+       struct i40e_link_status link_info;
+       struct i40e_link_status link_info_old;
+       bool get_link_info;
+       enum i40e_media_type media_type;
+       /* all the phy types the NVM is capable of */
+       u32 phy_types;
+};
+
+#define I40E_HW_CAP_MAX_GPIO                   30
+#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO                0
+#define I40E_HW_CAP_MDIO_PORT_MODE_I2C         1
+
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+       u32  switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB                0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD      0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD  0x3
+
+       u32  management_mode;
+       u32  npar_enable;
+       u32  os2bmc;
+       u32  valid_functions;
+       bool sr_iov_1_1;
+       bool vmdq;
+       bool evb_802_1_qbg; /* Edge Virtual Bridging */
+       bool evb_802_1_qbh; /* Bridge Port Extension */
+       bool dcb;
+       bool fcoe;
+       bool iscsi; /* Indicates iSCSI enabled */
+       bool flex10_enable;
+       bool flex10_capable;
+       u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN       0x0
+#define I40E_FLEX10_MODE_DCC           0x1
+#define I40E_FLEX10_MODE_DCI           0x2
+
+       u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR   0x1
+#define I40E_FLEX10_STATUS_VC_MODE     0x2
+
+       bool mgmt_cem;
+       bool ieee_1588;
+       bool iwarp;
+       bool fd;
+       u32 fd_filters_guaranteed;
+       u32 fd_filters_best_effort;
+       bool rss;
+       u32 rss_table_size;
+       u32 rss_table_entry_width;
+       bool led[I40E_HW_CAP_MAX_GPIO];
+       bool sdp[I40E_HW_CAP_MAX_GPIO];
+       u32 nvm_image_type;
+       u32 num_flow_director_filters;
+       u32 num_vfs;
+       u32 vf_base_id;
+       u32 num_vsis;
+       u32 num_rx_qp;
+       u32 num_tx_qp;
+       u32 base_queue;
+       u32 num_msix_vectors;
+       u32 num_msix_vectors_vf;
+       u32 led_pin_num;
+       u32 sdp_pin_num;
+       u32 mdio_port_num;
+       u32 mdio_port_mode;
+       u8 rx_buf_chain_len;
+       u32 enabled_tcmap;
+       u32 maxtc;
+       u64 wr_csr_prot;
+};
+
+struct i40e_mac_info {
+       enum i40e_mac_type type;
+       u8 addr[ETH_ALEN];
+       u8 perm_addr[ETH_ALEN];
+       u8 san_addr[ETH_ALEN];
+       u8 port_addr[ETH_ALEN];
+       u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+       I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+       I40E_RESOURCE_READ = 1,
+       I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+       u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
+       u32 timeout;              /* [ms] */
+       u16 sr_size;              /* Shadow RAM size in words */
+       bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
+       u16 version;              /* NVM package version */
+       u32 eetrack;              /* NVM data version */
+       u32 oem_ver;              /* OEM version info */
+};
+
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+       I40E_NVMUPD_INVALID,
+       I40E_NVMUPD_READ_CON,
+       I40E_NVMUPD_READ_SNT,
+       I40E_NVMUPD_READ_LCB,
+       I40E_NVMUPD_READ_SA,
+       I40E_NVMUPD_WRITE_ERA,
+       I40E_NVMUPD_WRITE_CON,
+       I40E_NVMUPD_WRITE_SNT,
+       I40E_NVMUPD_WRITE_LCB,
+       I40E_NVMUPD_WRITE_SA,
+       I40E_NVMUPD_CSUM_CON,
+       I40E_NVMUPD_CSUM_SA,
+       I40E_NVMUPD_CSUM_LCB,
+       I40E_NVMUPD_STATUS,
+       I40E_NVMUPD_EXEC_AQ,
+       I40E_NVMUPD_GET_AQ_RESULT,
+};
+
+enum i40e_nvmupd_state {
+       I40E_NVMUPD_STATE_INIT,
+       I40E_NVMUPD_STATE_READING,
+       I40E_NVMUPD_STATE_WRITING,
+       I40E_NVMUPD_STATE_INIT_WAIT,
+       I40E_NVMUPD_STATE_WRITE_WAIT,
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code.  Where is the right file?
+ */
+#define I40E_NVM_READ  0xB
+#define I40E_NVM_WRITE 0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT   8
+#define I40E_NVM_TRANS_MASK    (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON           0x0
+#define I40E_NVM_SNT           0x1
+#define I40E_NVM_LCB           0x2
+#define I40E_NVM_SA            (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA           0x4
+#define I40E_NVM_CSUM          0x8
+#define I40E_NVM_EXEC          0xf
+
+#define I40E_NVM_ADAPT_SHIFT   16
+#define I40E_NVM_ADAPT_MASK    (0xffffULL << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA   4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+       u32 command;
+       u32 config;
+       u32 offset;     /* in bytes */
+       u32 data_size;  /* in bytes */
+       u8 data[1];
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+       i40e_bus_type_unknown = 0,
+       i40e_bus_type_pci,
+       i40e_bus_type_pcix,
+       i40e_bus_type_pci_express,
+       i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+       i40e_bus_speed_unknown  = 0,
+       i40e_bus_speed_33       = 33,
+       i40e_bus_speed_66       = 66,
+       i40e_bus_speed_100      = 100,
+       i40e_bus_speed_120      = 120,
+       i40e_bus_speed_133      = 133,
+       i40e_bus_speed_2500     = 2500,
+       i40e_bus_speed_5000     = 5000,
+       i40e_bus_speed_8000     = 8000,
+       i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+       i40e_bus_width_unknown  = 0,
+       i40e_bus_width_pcie_x1  = 1,
+       i40e_bus_width_pcie_x2  = 2,
+       i40e_bus_width_pcie_x4  = 4,
+       i40e_bus_width_pcie_x8  = 8,
+       i40e_bus_width_32       = 32,
+       i40e_bus_width_64       = 64,
+       i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+       enum i40e_bus_speed speed;
+       enum i40e_bus_width width;
+       enum i40e_bus_type type;
+
+       u16 func;
+       u16 device;
+       u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+       enum i40e_fc_mode current_mode; /* FC mode in effect */
+       enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS         8
+#define I40E_MAX_USER_PRIORITY         8
+#define I40E_DCBX_MAX_APPS             32
+#define I40E_LLDPDU_SIZE               1500
+#define I40E_TLV_STATUS_OPER           0x1
+#define I40E_TLV_STATUS_SYNC           0x2
+#define I40E_TLV_STATUS_ERR            0x4
+#define I40E_CEE_OPER_MAX_APPS         3
+#define I40E_APP_PROTOID_FCOE          0x8906
+#define I40E_APP_PROTOID_ISCSI         0x0cbc
+#define I40E_APP_PROTOID_FIP           0x8914
+#define I40E_APP_SEL_ETHTYPE           0x1
+#define I40E_APP_SEL_TCPIP             0x2
+#define I40E_CEE_APP_SEL_ETHTYPE       0x0
+#define I40E_CEE_APP_SEL_TCPIP         0x1
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct i40e_dcb_ets_config {
+       u8 willing;
+       u8 cbs;
+       u8 maxtcs;
+       u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+       u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+       u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct i40e_dcb_pfc_config {
+       u8 willing;
+       u8 mbc;
+       u8 pfccap;
+       u8 pfcenable;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct i40e_dcb_app_priority_table {
+       u8  priority;
+       u8  selector;
+       u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+       u8  dcbx_mode;
+#define I40E_DCBX_MODE_CEE     0x1
+#define I40E_DCBX_MODE_IEEE    0x2
+       u8  app_mode;
+#define I40E_DCBX_APPS_NON_WILLING     0x1
+       u32 numapps;
+       u32 tlv_status; /* CEE mode TLV status */
+       struct i40e_dcb_ets_config etscfg;
+       struct i40e_dcb_ets_config etsrec;
+       struct i40e_dcb_pfc_config pfc;
+       struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+       u8 __iomem *hw_addr;
+       void *back;
+
+       /* subsystem structs */
+       struct i40e_phy_info phy;
+       struct i40e_mac_info mac;
+       struct i40e_bus_info bus;
+       struct i40e_nvm_info nvm;
+       struct i40e_fc_info fc;
+
+       /* pci info */
+       u16 device_id;
+       u16 vendor_id;
+       u16 subsystem_device_id;
+       u16 subsystem_vendor_id;
+       u8 revision_id;
+       u8 port;
+       bool adapter_stopped;
+
+       /* capabilities for entire device and PCI func */
+       struct i40e_hw_capabilities dev_caps;
+       struct i40e_hw_capabilities func_caps;
+
+       /* Flow Director shared filter space */
+       u16 fdir_shared_filter_count;
+
+       /* device profile info */
+       u8  pf_id;
+       u16 main_vsi_seid;
+
+       /* for multi-function MACs */
+       u16 partition_id;
+       u16 num_partitions;
+       u16 num_ports;
+
+       /* Closest numa node to the device */
+       u16 numa_node;
+
+       /* Admin Queue info */
+       struct i40e_adminq_info aq;
+
+       /* state of nvm update process */
+       enum i40e_nvmupd_state nvmupd_state;
+       struct i40e_aq_desc nvm_wb_desc;
+       struct i40e_virt_mem nvm_buff;
+
+       /* HMC info */
+       struct i40e_hmc_info hmc; /* HMC info struct */
+
+       /* LLDP/DCBX Status */
+       u16 dcbx_status;
+
+       /* DCBX info */
+       struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+       struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+       struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+       /* debug mask */
+       u32 debug_mask;
+       char err_str[16];
+};
+
+static INLINE bool i40e_is_vf(struct i40e_hw *hw)
+{
+       return hw->mac.type == I40E_MAC_VF;
+}
+
+struct i40e_driver_version {
+       u8 major_version;
+       u8 minor_version;
+       u8 build_version;
+       u8 subbuild_version;
+       u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+       struct {
+               __le64 pkt_addr; /* Packet buffer address */
+               __le64 hdr_addr; /* Header buffer address */
+       } read;
+       struct {
+               struct {
+                       struct {
+                               union {
+                                       __le16 mirroring_status;
+                                       __le16 fcoe_ctx_id;
+                               } mirr_fcoe;
+                               __le16 l2tag1;
+                       } lo_dword;
+                       union {
+                               __le32 rss; /* RSS Hash */
+                               __le32 fd_id; /* Flow director filter id */
+                               __le32 fcoe_param; /* FCoE DDP Context id */
+                       } hi_dword;
+               } qword0;
+               struct {
+                       /* ext status/error/pktype/length */
+                       __le64 status_error_len;
+               } qword1;
+       } wb;  /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+       struct {
+               __le64  pkt_addr; /* Packet buffer address */
+               __le64  hdr_addr; /* Header buffer address */
+                       /* bit 0 of hdr_buffer_addr is DD bit */
+               __le64  rsvd1;
+               __le64  rsvd2;
+       } read;
+       struct {
+               struct {
+                       struct {
+                               union {
+                                       __le16 mirroring_status;
+                                       __le16 fcoe_ctx_id;
+                               } mirr_fcoe;
+                               __le16 l2tag1;
+                       } lo_dword;
+                       union {
+                               __le32 rss; /* RSS Hash */
+                               __le32 fcoe_param; /* FCoE DDP Context id */
+                               /* Flow director filter id in case of
+                                * Programming status desc WB
+                                */
+                               __le32 fd_id;
+                       } hi_dword;
+               } qword0;
+               struct {
+                       /* status/error/pktype/length */
+                       __le64 status_error_len;
+               } qword1;
+               struct {
+                       __le16 ext_status; /* extended status */
+                       __le16 rsvd;
+                       __le16 l2tag2_1;
+                       __le16 l2tag2_2;
+               } qword2;
+               struct {
+                       union {
+                               __le32 flex_bytes_lo;
+                               __le32 pe_status;
+                       } lo_dword;
+                       union {
+                               __le32 flex_bytes_hi;
+                               __le32 fd_id;
+                       } hi_dword;
+               } qword3;
+       } wb;  /* writeback */
+};
+
+#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT       8
+#define I40E_RXD_QW0_MIRROR_STATUS_MASK        (0x3FUL << \
+                                        I40E_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define I40E_RXD_QW0_FCOEINDX_SHIFT    0
+#define I40E_RXD_QW0_FCOEINDX_MASK     (0xFFFUL << \
+                                        I40E_RXD_QW0_FCOEINDX_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+       /* Note: These are predefined bit offsets */
+       I40E_RX_DESC_STATUS_DD_SHIFT            = 0,
+       I40E_RX_DESC_STATUS_EOF_SHIFT           = 1,
+       I40E_RX_DESC_STATUS_L2TAG1P_SHIFT       = 2,
+       I40E_RX_DESC_STATUS_L3L4P_SHIFT         = 3,
+       I40E_RX_DESC_STATUS_CRCP_SHIFT          = 4,
+       I40E_RX_DESC_STATUS_TSYNINDX_SHIFT      = 5, /* 2 BITS */
+       I40E_RX_DESC_STATUS_TSYNVALID_SHIFT     = 7,
+       I40E_RX_DESC_STATUS_RESERVED1_SHIFT     = 8,
+
+       I40E_RX_DESC_STATUS_UMBCAST_SHIFT       = 9, /* 2 BITS */
+       I40E_RX_DESC_STATUS_FLM_SHIFT           = 11,
+       I40E_RX_DESC_STATUS_FLTSTAT_SHIFT       = 12, /* 2 BITS */
+       I40E_RX_DESC_STATUS_LPBK_SHIFT          = 14,
+       I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT     = 15,
+       I40E_RX_DESC_STATUS_RESERVED2_SHIFT     = 16, /* 2 BITS */
+       I40E_RX_DESC_STATUS_UDP_0_SHIFT         = 18,
+       I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT      0
+#define I40E_RXD_QW1_STATUS_MASK       ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
+                                        I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK      (0x3UL << \
+                                            I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK   BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT      I40E_RX_DESC_STATUS_UMBCAST
+#define I40E_RXD_QW1_STATUS_UMBCAST_MASK       (0x3UL << \
+                                        I40E_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+       I40E_RX_DESC_FLTSTAT_NO_DATA    = 0,
+       I40E_RX_DESC_FLTSTAT_RSV_FD_ID  = 1, /* 16byte desc? FD_ID : RSV */
+       I40E_RX_DESC_FLTSTAT_RSV        = 2,
+       I40E_RX_DESC_FLTSTAT_RSS_HASH   = 3,
+};
+
+#define I40E_RXD_PACKET_TYPE_UNICAST   0
+#define I40E_RXD_PACKET_TYPE_MULTICAST 1
+#define I40E_RXD_PACKET_TYPE_BROADCAST 2
+#define I40E_RXD_PACKET_TYPE_MIRRORED  3
+
+#define I40E_RXD_QW1_ERROR_SHIFT       19
+#define I40E_RXD_QW1_ERROR_MASK                (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+       /* Note: These are predefined bit offsets */
+       I40E_RX_DESC_ERROR_RXE_SHIFT            = 0,
+       I40E_RX_DESC_ERROR_RECIPE_SHIFT         = 1,
+       I40E_RX_DESC_ERROR_HBO_SHIFT            = 2,
+       I40E_RX_DESC_ERROR_L3L4E_SHIFT          = 3, /* 3 BITS */
+       I40E_RX_DESC_ERROR_IPE_SHIFT            = 3,
+       I40E_RX_DESC_ERROR_L4E_SHIFT            = 4,
+       I40E_RX_DESC_ERROR_EIPE_SHIFT           = 5,
+       I40E_RX_DESC_ERROR_OVERSIZE_SHIFT       = 6,
+       I40E_RX_DESC_ERROR_PPRS_SHIFT           = 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+       I40E_RX_DESC_ERROR_L3L4E_NONE           = 0,
+       I40E_RX_DESC_ERROR_L3L4E_PROT           = 1,
+       I40E_RX_DESC_ERROR_L3L4E_FC             = 2,
+       I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR       = 3,
+       I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN      = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT       30
+#define I40E_RXD_QW1_PTYPE_MASK                (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+       I40E_RX_PTYPE_L2_RESERVED                       = 0,
+       I40E_RX_PTYPE_L2_MAC_PAY2                       = 1,
+       I40E_RX_PTYPE_L2_TIMESYNC_PAY2                  = 2,
+       I40E_RX_PTYPE_L2_FIP_PAY2                       = 3,
+       I40E_RX_PTYPE_L2_OUI_PAY2                       = 4,
+       I40E_RX_PTYPE_L2_MACCNTRL_PAY2                  = 5,
+       I40E_RX_PTYPE_L2_LLDP_PAY2                      = 6,
+       I40E_RX_PTYPE_L2_ECP_PAY2                       = 7,
+       I40E_RX_PTYPE_L2_EVB_PAY2                       = 8,
+       I40E_RX_PTYPE_L2_QCN_PAY2                       = 9,
+       I40E_RX_PTYPE_L2_EAPOL_PAY2                     = 10,
+       I40E_RX_PTYPE_L2_ARP                            = 11,
+       I40E_RX_PTYPE_L2_FCOE_PAY3                      = 12,
+       I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3               = 13,
+       I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3                = 14,
+       I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3                = 15,
+       I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA                = 16,
+       I40E_RX_PTYPE_L2_FCOE_VFT_PAY3                  = 17,
+       I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA                = 18,
+       I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY                 = 19,
+       I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP                 = 20,
+       I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER               = 21,
+       I40E_RX_PTYPE_GRENAT4_MAC_PAY3                  = 58,
+       I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4    = 87,
+       I40E_RX_PTYPE_GRENAT6_MAC_PAY3                  = 124,
+       I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4    = 153
+};
+
+struct i40e_rx_ptype_decoded {
+       u32 ptype:8;
+       u32 known:1;
+       u32 outer_ip:1;
+       u32 outer_ip_ver:1;
+       u32 outer_frag:1;
+       u32 tunnel_type:3;
+       u32 tunnel_end_prot:2;
+       u32 tunnel_end_frag:1;
+       u32 inner_prot:4;
+       u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+       I40E_RX_PTYPE_OUTER_L2  = 0,
+       I40E_RX_PTYPE_OUTER_IP  = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+       I40E_RX_PTYPE_OUTER_NONE        = 0,
+       I40E_RX_PTYPE_OUTER_IPV4        = 0,
+       I40E_RX_PTYPE_OUTER_IPV6        = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+       I40E_RX_PTYPE_NOT_FRAG  = 0,
+       I40E_RX_PTYPE_FRAG      = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+       I40E_RX_PTYPE_TUNNEL_NONE               = 0,
+       I40E_RX_PTYPE_TUNNEL_IP_IP              = 1,
+       I40E_RX_PTYPE_TUNNEL_IP_GRENAT          = 2,
+       I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC      = 3,
+       I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+       I40E_RX_PTYPE_TUNNEL_END_NONE   = 0,
+       I40E_RX_PTYPE_TUNNEL_END_IPV4   = 1,
+       I40E_RX_PTYPE_TUNNEL_END_IPV6   = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+       I40E_RX_PTYPE_INNER_PROT_NONE           = 0,
+       I40E_RX_PTYPE_INNER_PROT_UDP            = 1,
+       I40E_RX_PTYPE_INNER_PROT_TCP            = 2,
+       I40E_RX_PTYPE_INNER_PROT_SCTP           = 3,
+       I40E_RX_PTYPE_INNER_PROT_ICMP           = 4,
+       I40E_RX_PTYPE_INNER_PROT_TIMESYNC       = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+       I40E_RX_PTYPE_PAYLOAD_LAYER_NONE        = 0,
+       I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2        = 1,
+       I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3        = 2,
+       I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4        = 3,
+};
+
+#define I40E_RX_PTYPE_BIT_MASK         0x0FFFFFFF
+#define I40E_RX_PTYPE_SHIFT            56
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK  (0x3FFFULL << \
+                                        I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK  (0x7FFULL << \
+                                        I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT  63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK   BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define I40E_RXD_QW1_NEXTP_SHIFT       38
+#define I40E_RXD_QW1_NEXTP_MASK                (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
+
+#define I40E_RXD_QW2_EXT_STATUS_SHIFT  0
+#define I40E_RXD_QW2_EXT_STATUS_MASK   (0xFFFFFUL << \
+                                        I40E_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+       /* Note: These are predefined bit offsets */
+       I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT   = 0,
+       I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT   = 1,
+       I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT    = 2, /* 2 BITS */
+       I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT    = 4, /* 2 BITS */
+       I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT   = 9,
+       I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+       I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT   = 11,
+};
+
+#define I40E_RXD_QW2_L2TAG2_SHIFT      0
+#define I40E_RXD_QW2_L2TAG2_MASK       (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT)
+
+#define I40E_RXD_QW2_L2TAG3_SHIFT      16
+#define I40E_RXD_QW2_L2TAG3_MASK       (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT)
+
+enum i40e_rx_desc_pe_status_bits {
+       /* Note: These are predefined bit offsets */
+       I40E_RX_DESC_PE_STATUS_QPID_SHIFT       = 0, /* 18 BITS */
+       I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT     = 0, /* 16 BITS */
+       I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT    = 16, /* 8 BITS */
+       I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT    = 24,
+       I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT   = 25,
+       I40E_RX_DESC_PE_STATUS_PORTV_SHIFT      = 26,
+       I40E_RX_DESC_PE_STATUS_URG_SHIFT        = 27,
+       I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT     = 28,
+       I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT      = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT          38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH                        0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT      2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK       (0x7UL << \
+                               I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT      0
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK       (0x7FFFUL << \
+                               I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT       19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK                (0x3FUL << \
+                               I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+       /* Note: These are predefined bit offsets */
+       I40E_RX_PROG_STATUS_DESC_DD_SHIFT       = 0,
+       I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT  = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+       I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS       = 1,
+       I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS  = 2,
+       I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS  = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+       /* Note: These are predefined bit offsets */
+       I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT      = 0,
+       I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT      = 1,
+       I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT    = 2,
+       I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT    = 3
+};
+
+#define I40E_TWO_BIT_MASK      0x3
+#define I40E_THREE_BIT_MASK    0x7
+#define I40E_FOUR_BIT_MASK     0xF
+#define I40E_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+       __le64 buffer_addr; /* Address of descriptor's data buf */
+       __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT       0
+#define I40E_TXD_QW1_DTYPE_MASK                (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+       I40E_TX_DESC_DTYPE_DATA         = 0x0,
+       I40E_TX_DESC_DTYPE_NOP          = 0x1, /* same as Context desc */
+       I40E_TX_DESC_DTYPE_CONTEXT      = 0x1,
+       I40E_TX_DESC_DTYPE_FCOE_CTX     = 0x2,
+       I40E_TX_DESC_DTYPE_FILTER_PROG  = 0x8,
+       I40E_TX_DESC_DTYPE_DDP_CTX      = 0x9,
+       I40E_TX_DESC_DTYPE_FLEX_DATA    = 0xB,
+       I40E_TX_DESC_DTYPE_FLEX_CTX_1   = 0xC,
+       I40E_TX_DESC_DTYPE_FLEX_CTX_2   = 0xD,
+       I40E_TX_DESC_DTYPE_DESC_DONE    = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK  (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+       I40E_TX_DESC_CMD_EOP                    = 0x0001,
+       I40E_TX_DESC_CMD_RS                     = 0x0002,
+       I40E_TX_DESC_CMD_ICRC                   = 0x0004,
+       I40E_TX_DESC_CMD_IL2TAG1                = 0x0008,
+       I40E_TX_DESC_CMD_DUMMY                  = 0x0010,
+       I40E_TX_DESC_CMD_IIPT_NONIP             = 0x0000, /* 2 BITS */
+       I40E_TX_DESC_CMD_IIPT_IPV6              = 0x0020, /* 2 BITS */
+       I40E_TX_DESC_CMD_IIPT_IPV4              = 0x0040, /* 2 BITS */
+       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM         = 0x0060, /* 2 BITS */
+       I40E_TX_DESC_CMD_FCOET                  = 0x0080,
+       I40E_TX_DESC_CMD_L4T_EOFT_UNK           = 0x0000, /* 2 BITS */
+       I40E_TX_DESC_CMD_L4T_EOFT_TCP           = 0x0100, /* 2 BITS */
+       I40E_TX_DESC_CMD_L4T_EOFT_SCTP          = 0x0200, /* 2 BITS */
+       I40E_TX_DESC_CMD_L4T_EOFT_UDP           = 0x0300, /* 2 BITS */
+       I40E_TX_DESC_CMD_L4T_EOFT_EOF_N         = 0x0000, /* 2 BITS */
+       I40E_TX_DESC_CMD_L4T_EOFT_EOF_T         = 0x0100, /* 2 BITS */
+       I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI        = 0x0200, /* 2 BITS */
+       I40E_TX_DESC_CMD_L4T_EOFT_EOF_A         = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT      16
+#define I40E_TXD_QW1_OFFSET_MASK       (0x3FFFFULL << \
+                                        I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+       /* Note: These are predefined bit offsets */
+       I40E_TX_DESC_LENGTH_MACLEN_SHIFT        = 0, /* 7 BITS */
+       I40E_TX_DESC_LENGTH_IPLEN_SHIFT         = 7, /* 7 BITS */
+       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT     = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define I40E_TXD_QW1_IPLEN_MASK  (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define I40E_TXD_QW1_L4LEN_MASK  (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define I40E_TXD_QW1_FCLEN_MASK  (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT   34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK    (0x3FFFULL << \
+                                        I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT      48
+#define I40E_TXD_QW1_L2TAG1_MASK       (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+       __le32 tunneling_params;
+       __le16 l2tag2;
+       __le16 rsvd;
+       __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT   0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK    (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT     4
+#define I40E_TXD_CTX_QW1_CMD_MASK      (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+       I40E_TX_CTX_DESC_TSO            = 0x01,
+       I40E_TX_CTX_DESC_TSYN           = 0x02,
+       I40E_TX_CTX_DESC_IL2TAG2        = 0x04,
+       I40E_TX_CTX_DESC_IL2TAG2_IL2H   = 0x08,
+       I40E_TX_CTX_DESC_SWTCH_NOTAG    = 0x00,
+       I40E_TX_CTX_DESC_SWTCH_UPLINK   = 0x10,
+       I40E_TX_CTX_DESC_SWTCH_LOCAL    = 0x20,
+       I40E_TX_CTX_DESC_SWTCH_VSI      = 0x30,
+       I40E_TX_CTX_DESC_SWPE           = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK  (0x3FFFFULL << \
+                                        I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT     50
+#define I40E_TXD_CTX_QW1_MSS_MASK      (0x3FFFULL << \
+                                        I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT     50
+#define I40E_TXD_CTX_QW1_VSI_MASK      (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT  0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK   (0x3ULL << \
+                                        I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+       I40E_TX_CTX_EXT_IP_NONE         = 0x0,
+       I40E_TX_CTX_EXT_IP_IPV6         = 0x1,
+       I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+       I40E_TX_CTX_EXT_IP_IPV4         = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT       2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK        (0x3FULL << \
+                                        I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT    9
+#define I40E_TXD_CTX_QW0_NATT_MASK     (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING     BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING     (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT       11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK        BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST      I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT  12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK   (0X7FULL << \
+                                        I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT  19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK   (0xFULL << \
+                                        I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_nop_desc {
+       __le64 rsvd;
+       __le64 dtype_cmd;
+};
+
+#define I40E_TXD_NOP_QW1_DTYPE_SHIFT   0
+#define I40E_TXD_NOP_QW1_DTYPE_MASK    (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_NOP_QW1_CMD_SHIFT     4
+#define I40E_TXD_NOP_QW1_CMD_MASK      (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT)
+
+enum i40e_tx_nop_desc_cmd_bits {
+       /* Note: These are predefined bit offsets */
+       I40E_TX_NOP_DESC_EOP_SHIFT      = 0,
+       I40E_TX_NOP_DESC_RS_SHIFT       = 1,
+       I40E_TX_NOP_DESC_RSV_SHIFT      = 2 /* 5 bits */
+};
+
+struct i40e_filter_program_desc {
+       __le32 qindex_flex_ptype_vsi;
+       __le32 rsvd;
+       __le32 dtype_cmd_cntindex;
+       __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK  (0x7FFUL << \
+                                        I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT        11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+                                        I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK  (0x3FUL << \
+                                        I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+       /* Note: Values 0-30 are reserved for future use */
+       I40E_FILTER_PCTYPE_NONF_IPV4_UDP                = 31,
+       /* Note: Value 32 is reserved for future use */
+       I40E_FILTER_PCTYPE_NONF_IPV4_TCP                = 33,
+       I40E_FILTER_PCTYPE_NONF_IPV4_SCTP               = 34,
+       I40E_FILTER_PCTYPE_NONF_IPV4_OTHER              = 35,
+       I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
+       /* Note: Values 37-40 are reserved for future use */
+       I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
+       I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
+       I40E_FILTER_PCTYPE_NONF_IPV6_SCTP               = 44,
+       I40E_FILTER_PCTYPE_NONF_IPV6_OTHER              = 45,
+       I40E_FILTER_PCTYPE_FRAG_IPV6                    = 46,
+       /* Note: Value 47 is reserved for future use */
+       I40E_FILTER_PCTYPE_FCOE_OX                      = 48,
+       I40E_FILTER_PCTYPE_FCOE_RX                      = 49,
+       I40E_FILTER_PCTYPE_FCOE_OTHER                   = 50,
+       /* Note: Values 51-62 are reserved for future use */
+       I40E_FILTER_PCTYPE_L2_PAYLOAD                   = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+       I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET               = 0x0,
+       I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX      = 0x1,
+       I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER       = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+       I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE                 = 0x0,
+       I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID                = 0x1,
+       I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES    = 0x2,
+       I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES          = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT       23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK        (0x1FFUL << \
+                                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT  0
+#define I40E_TXD_FLTR_QW1_DTYPE_MASK   (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT    4
+#define I40E_TXD_FLTR_QW1_CMD_MASK     (0xFFFFULL << \
+                                        I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT   (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK    (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+       I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE        = 0x1,
+       I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE            = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT   (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK    (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT        (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT      (0x9ULL + \
+                                                I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+                                         I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK        (0x1FFUL << \
+                                        I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+       I40E_FLOW_DIRECTOR_FLTR = 0,
+       I40E_PE_QUAD_HASH_FLTR = 1,
+       I40E_ETHERTYPE_FLTR,
+       I40E_FCOE_CTX_FLTR,
+       I40E_MAC_VLAN_FLTR,
+       I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+       u16 seid;
+       u16 uplink_seid;
+       u16 vsi_number;
+       u16 vsis_allocated;
+       u16 vsis_unallocated;
+       u16 flags;
+       u8 pf_num;
+       u8 vf_num;
+       u8 connection_type;
+       struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+       u16 seid;
+       u16 uplink_seid;
+       u16 veb_number;
+       u16 vebs_allocated;
+       u16 vebs_unallocated;
+       u16 flags;
+       struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+       u64 rx_bytes;                   /* gorc */
+       u64 rx_unicast;                 /* uprc */
+       u64 rx_multicast;               /* mprc */
+       u64 rx_broadcast;               /* bprc */
+       u64 rx_discards;                /* rdpc */
+       u64 rx_unknown_protocol;        /* rupp */
+       u64 tx_bytes;                   /* gotc */
+       u64 tx_unicast;                 /* uptc */
+       u64 tx_multicast;               /* mptc */
+       u64 tx_broadcast;               /* bptc */
+       u64 tx_discards;                /* tdpc */
+       u64 tx_errors;                  /* tepc */
+};
+
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+       u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+       u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+       u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+       u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
+#ifdef I40E_FCOE
+/* Statistics collected per function for FCoE */
+struct i40e_fcoe_stats {
+       u64 rx_fcoe_packets;            /* fcoeprc */
+       u64 rx_fcoe_dwords;             /* focedwrc */
+       u64 rx_fcoe_dropped;            /* fcoerpdc */
+       u64 tx_fcoe_packets;            /* fcoeptc */
+       u64 tx_fcoe_dwords;             /* focedwtc */
+       u64 fcoe_bad_fccrc;             /* fcoecrc */
+       u64 fcoe_last_error;            /* fcoelast */
+       u64 fcoe_ddp_count;             /* fcoeddpc */
+};
+
+/* offset to per function FCoE statistics block */
+#define I40E_FCOE_VF_STAT_OFFSET       0
+#define I40E_FCOE_PF_STAT_OFFSET       128
+#define I40E_FCOE_STAT_MAX             (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
+
+#endif
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+       /* eth stats collected by the port */
+       struct i40e_eth_stats eth;
+
+       /* additional port specific stats */
+       u64 tx_dropped_link_down;       /* tdold */
+       u64 crc_errors;                 /* crcerrs */
+       u64 illegal_bytes;              /* illerrc */
+       u64 error_bytes;                /* errbc */
+       u64 mac_local_faults;           /* mlfc */
+       u64 mac_remote_faults;          /* mrfc */
+       u64 rx_length_errors;           /* rlec */
+       u64 link_xon_rx;                /* lxonrxc */
+       u64 link_xoff_rx;               /* lxoffrxc */
+       u64 priority_xon_rx[8];         /* pxonrxc[8] */
+       u64 priority_xoff_rx[8];        /* pxoffrxc[8] */
+       u64 link_xon_tx;                /* lxontxc */
+       u64 link_xoff_tx;               /* lxofftxc */
+       u64 priority_xon_tx[8];         /* pxontxc[8] */
+       u64 priority_xoff_tx[8];        /* pxofftxc[8] */
+       u64 priority_xon_2_xoff[8];     /* pxon2offc[8] */
+       u64 rx_size_64;                 /* prc64 */
+       u64 rx_size_127;                /* prc127 */
+       u64 rx_size_255;                /* prc255 */
+       u64 rx_size_511;                /* prc511 */
+       u64 rx_size_1023;               /* prc1023 */
+       u64 rx_size_1522;               /* prc1522 */
+       u64 rx_size_big;                /* prc9522 */
+       u64 rx_undersize;               /* ruc */
+       u64 rx_fragments;               /* rfc */
+       u64 rx_oversize;                /* roc */
+       u64 rx_jabber;                  /* rjc */
+       u64 tx_size_64;                 /* ptc64 */
+       u64 tx_size_127;                /* ptc127 */
+       u64 tx_size_255;                /* ptc255 */
+       u64 tx_size_511;                /* ptc511 */
+       u64 tx_size_1023;               /* ptc1023 */
+       u64 tx_size_1522;               /* ptc1522 */
+       u64 tx_size_big;                /* ptc9522 */
+       u64 mac_short_packet_dropped;   /* mspdc */
+       u64 checksum_error;             /* xec */
+       /* flow director stats */
+       u64 fd_atr_match;
+       u64 fd_sb_match;
+       u64 fd_atr_tunnel_match;
+       u32 fd_atr_status;
+       u32 fd_sb_status;
+       /* EEE LPI */
+       u32 tx_lpi_status;
+       u32 rx_lpi_status;
+       u64 tx_lpi_count;               /* etlpic */
+       u64 rx_lpi_count;               /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD               0x00
+#define I40E_SR_PCIE_ANALOG_CONFIG_PTR         0x03
+#define I40E_SR_PHY_ANALOG_CONFIG_PTR          0x04
+#define I40E_SR_OPTION_ROM_PTR                 0x05
+#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR     0x06
+#define I40E_SR_AUTO_GENERATED_POINTERS_PTR    0x07
+#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR                0x08
+#define I40E_SR_EMP_GLOBAL_MODULE_PTR          0x09
+#define I40E_SR_RO_PCIE_LCB_PTR                        0x0A
+#define I40E_SR_EMP_IMAGE_PTR                  0x0B
+#define I40E_SR_PE_IMAGE_PTR                   0x0C
+#define I40E_SR_CSR_PROTECTED_LIST_PTR         0x0D
+#define I40E_SR_MNG_CONFIG_PTR                 0x0E
+#define I40E_SR_EMP_MODULE_PTR                 0x0F
+#define I40E_SR_PBA_FLAGS                      0x15
+#define I40E_SR_PBA_BLOCK_PTR                  0x16
+#define I40E_SR_BOOT_CONFIG_PTR                        0x17
+#define I40E_NVM_OEM_VER_OFF                   0x83
+#define I40E_SR_NVM_DEV_STARTER_VERSION                0x18
+#define I40E_SR_NVM_WAKE_ON_LAN                        0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
+#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR  0x28
+#define I40E_SR_NVM_MAP_VERSION                        0x29
+#define I40E_SR_NVM_IMAGE_VERSION              0x2A
+#define I40E_SR_NVM_STRUCTURE_VERSION          0x2B
+#define I40E_SR_NVM_EETRACK_LO                 0x2D
+#define I40E_SR_NVM_EETRACK_HI                 0x2E
+#define I40E_SR_VPD_PTR                                0x2F
+#define I40E_SR_PXE_SETUP_PTR                  0x30
+#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR    0x31
+#define I40E_SR_NVM_ORIGINAL_EETRACK_LO                0x34
+#define I40E_SR_NVM_ORIGINAL_EETRACK_HI                0x35
+#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR    0x37
+#define I40E_SR_POR_REGS_AUTO_LOAD_PTR         0x38
+#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR                0x3A
+#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR       0x3B
+#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR       0x3C
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR         0x3E
+#define I40E_SR_SW_CHECKSUM_WORD               0x3F
+#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR    0x40
+#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR    0x42
+#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR    0x44
+#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR    0x46
+#define I40E_SR_EMP_SR_SETTINGS_PTR            0x48
+#define I40E_SR_FEATURE_CONFIGURATION_PTR      0x49
+#define I40E_SR_CONFIGURATION_METADATA_PTR     0x4D
+#define I40E_SR_IMMEDIATE_VALUES_PTR           0x4E
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE            1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE       1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT           0x06
+#define I40E_SR_CONTROL_WORD_1_MASK    (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS   0x800
+#define I40E_SR_BUF_ALIGNMENT          4096
+#define I40E_SR_WORDS_IN_1KB           512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE       0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS       100000
+
+#ifdef I40E_FCOE
+/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
+
+enum i40E_fcoe_tx_ctx_desc_cmd_bits {
+       I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND        = 0x00, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2      = 0x01, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3      = 0x05, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2     = 0x02, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3     = 0x06, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2      = 0x03, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3      = 0x07, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL       = 0x08, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL       = 0x09, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_RELOFF                    = 0x10,
+       I40E_FCOE_TX_CTX_DESC_CLRSEQ                    = 0x20,
+       I40E_FCOE_TX_CTX_DESC_DIFENA                    = 0x40,
+       I40E_FCOE_TX_CTX_DESC_IL2TAG2                   = 0x80
+};
+
+/* FCoE DIF/DIX Context descriptor */
+struct i40e_fcoe_difdix_context_desc {
+       __le64 flags_buff0_buff1_ref;
+       __le64 difapp_msk_bias;
+};
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT   0
+#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_MASK    (0xFFFULL << \
+                                       I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT)
+
+enum i40e_fcoe_difdix_ctx_desc_flags_bits {
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_RSVD                          = 0x0000,
+       /* 1 BIT  */
+       I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGCHK                = 0x0000,
+       /* 1 BIT  */
+       I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGNOTCHK             = 0x0004,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_OPAQUE                  = 0x0000,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY            = 0x0008,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPTAG     = 0x0010,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPREFTAG  = 0x0018,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_CNST                  = 0x0000,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_INC1BLK               = 0x0020,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_APPTAG                = 0x0040,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_RSVD                  = 0x0060,
+       /* 1 BIT  */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_XSUM                  = 0x0000,
+       /* 1 BIT  */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_CRC                   = 0x0080,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_UNTAG                 = 0x0000,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_BUF                   = 0x0100,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_RSVD                  = 0x0200,
+       /* 2 BITS */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_EMBDTAGS              = 0x0300,
+       /* 1 BIT  */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_UNTAG                  = 0x0000,
+       /* 1 BIT  */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_TAG                    = 0x0400,
+       /* 1 BIT */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_512B                   = 0x0000,
+       /* 1 BIT */
+       I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_4K                     = 0x0800
+};
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT   12
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_MASK    (0x3FFULL << \
+                                       I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT   22
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_MASK    (0x3FFULL << \
+                                       I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT     32
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_MASK      (0xFFFFFFFFULL << \
+                                       I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT     0
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MASK      (0xFFFFULL << \
+                                       I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT 16
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_MASK  (0xFFFFULL << \
+                                       I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT        32
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_BIAS_MASK (0xFFFFFFFFULL << \
+                                       I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT)
+
+/* FCoE DIF/DIX Buffers descriptor */
+struct i40e_fcoe_difdix_buffers_desc {
+       __le64 buff_addr0;
+       __le64 buff_addr1;
+};
+
+/* FCoE DDP Context descriptor */
+struct i40e_fcoe_ddp_context_desc {
+       __le64 rsvd;
+       __le64 type_cmd_foff_lsize;
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT      0
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK       (0xFULL << \
+                                       I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT        4
+#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
+                                        I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
+
+enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
+       I40E_FCOE_DDP_CTX_DESC_BSIZE_512B       = 0x00, /* 2 BITS */
+       I40E_FCOE_DDP_CTX_DESC_BSIZE_4K         = 0x01, /* 2 BITS */
+       I40E_FCOE_DDP_CTX_DESC_BSIZE_8K         = 0x02, /* 2 BITS */
+       I40E_FCOE_DDP_CTX_DESC_BSIZE_16K        = 0x03, /* 2 BITS */
+       I40E_FCOE_DDP_CTX_DESC_DIFENA           = 0x04, /* 1 BIT  */
+       I40E_FCOE_DDP_CTX_DESC_LASTSEQH         = 0x08, /* 1 BIT  */
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT       16
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK        (0x3FFFULL << \
+                                        I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT      32
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK       (0x3FFFULL << \
+                                       I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
+
+/* FCoE DDP/DWO Queue Context descriptor */
+struct i40e_fcoe_queue_context_desc {
+       __le64 dmaindx_fbase;           /* 0:11 DMAINDX, 12:63 FBASE */
+       __le64 flen_tph;                /* 0:12 FLEN, 13:15 TPH */
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT  0
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK   (0xFFFULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT    12
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK     (0xFFFFFFFFFFFFFULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT     0
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK      (0x1FFFULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT      13
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK       (0x7ULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+enum i40e_fcoe_queue_ctx_desc_tph_bits {
+       I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC       = 0x1,
+       I40E_FCOE_QUEUE_CTX_DESC_TPHDATA        = 0x2
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT   30
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK    (0x3ULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
+
+/* FCoE DDP/DWO Filter Context descriptor */
+struct i40e_fcoe_filter_context_desc {
+       __le32 param;
+       __le16 seqn;
+
+       /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
+       __le16 rsvd_dmaindx;
+
+       /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
+       __le64 flags_rsvd_lanq;
+};
+
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK  (0xFFF << \
+                                       I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
+
+enum i40e_fcoe_filter_ctx_desc_flags_bits {
+       I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP      = 0x00,
+       I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO      = 0x01,
+       I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT    = 0x00,
+       I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP     = 0x02,
+       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2     = 0x00,
+       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3     = 0x04
+};
+
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT   0
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK    (0xFFULL << \
+                                       I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT     8
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK      (0x3FULL << \
+                       I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT     53
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK      (0x7FFULL << \
+                       I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
+
+#endif /* I40E_FCOE */
+enum i40e_switch_element_types {
+       I40E_SWITCH_ELEMENT_TYPE_MAC    = 1,
+       I40E_SWITCH_ELEMENT_TYPE_PF     = 2,
+       I40E_SWITCH_ELEMENT_TYPE_VF     = 3,
+       I40E_SWITCH_ELEMENT_TYPE_EMP    = 4,
+       I40E_SWITCH_ELEMENT_TYPE_BMC    = 6,
+       I40E_SWITCH_ELEMENT_TYPE_PE     = 16,
+       I40E_SWITCH_ELEMENT_TYPE_VEB    = 17,
+       I40E_SWITCH_ELEMENT_TYPE_PA     = 18,
+       I40E_SWITCH_ELEMENT_TYPE_VSI    = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+       I40E_ETHER_TYPE_1588            = 0,
+       I40E_ETHER_TYPE_FIP             = 1,
+       I40E_ETHER_TYPE_OUI_EXTENDED    = 2,
+       I40E_ETHER_TYPE_MAC_CONTROL     = 3,
+       I40E_ETHER_TYPE_LLDP            = 4,
+       I40E_ETHER_TYPE_EVB_PROTOCOL1   = 5,
+       I40E_ETHER_TYPE_EVB_PROTOCOL2   = 6,
+       I40E_ETHER_TYPE_QCN_CNM         = 7,
+       I40E_ETHER_TYPE_8021X           = 8,
+       I40E_ETHER_TYPE_ARP             = 9,
+       I40E_ETHER_TYPE_RSV1            = 10,
+       I40E_ETHER_TYPE_RSV2            = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE     1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+       I40E_HASH_FILTER_SIZE_1K        = 0,
+       I40E_HASH_FILTER_SIZE_2K        = 1,
+       I40E_HASH_FILTER_SIZE_4K        = 2,
+       I40E_HASH_FILTER_SIZE_8K        = 3,
+       I40E_HASH_FILTER_SIZE_16K       = 4,
+       I40E_HASH_FILTER_SIZE_32K       = 5,
+       I40E_HASH_FILTER_SIZE_64K       = 6,
+       I40E_HASH_FILTER_SIZE_128K      = 7,
+       I40E_HASH_FILTER_SIZE_256K      = 8,
+       I40E_HASH_FILTER_SIZE_512K      = 9,
+       I40E_HASH_FILTER_SIZE_1M        = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE                512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+       I40E_DMA_CNTX_SIZE_512          = 0,
+       I40E_DMA_CNTX_SIZE_1K           = 1,
+       I40E_DMA_CNTX_SIZE_2K           = 2,
+       I40E_DMA_CNTX_SIZE_4K           = 3,
+       I40E_DMA_CNTX_SIZE_8K           = 4,
+       I40E_DMA_CNTX_SIZE_16K          = 5,
+       I40E_DMA_CNTX_SIZE_32K          = 6,
+       I40E_DMA_CNTX_SIZE_64K          = 7,
+       I40E_DMA_CNTX_SIZE_128K         = 8,
+       I40E_DMA_CNTX_SIZE_256K         = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+       I40E_HASH_LUT_SIZE_128          = 0,
+       I40E_HASH_LUT_SIZE_512          = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+       /* number of PE Quad Hash filter buckets */
+       enum i40e_hash_filter_size pe_filt_num;
+       /* number of PE Quad Hash contexts */
+       enum i40e_dma_cntx_size pe_cntx_num;
+       /* number of FCoE filter buckets */
+       enum i40e_hash_filter_size fcoe_filt_num;
+       /* number of FCoE DDP contexts */
+       enum i40e_dma_cntx_size fcoe_cntx_num;
+       /* size of the Hash LUT */
+       enum i40e_hash_lut_size hash_lut_size;
+       /* enable FDIR filters for PF and its VFs */
+       bool enable_fdir;
+       /* enable Ethertype filters for PF and its VFs */
+       bool enable_ethtype;
+       /* enable MAC/VLAN filters for PF and its VFs */
+       bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+       u16 mac_etype_used;   /* Used perfect match MAC/EtherType filters */
+       u16 etype_used;       /* Used perfect EtherType filters */
+       u16 mac_etype_free;   /* Un-used perfect match MAC/EtherType filters */
+       u16 etype_free;       /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+       I40E_RESET_POR          = 0,
+       I40E_RESET_CORER        = 1,
+       I40E_RESET_GLOBR        = 2,
+       I40E_RESET_EMPR         = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR          0xD
+struct i40e_lldp_variables {
+       u16 length;
+       u16 adminstatus;
+       u16 msgfasttx;
+       u16 msgtxinterval;
+       u16 txparams;
+       u16 timers;
+       u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET                0   /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF          64   /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET  0xD  /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET   0xC  /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET          0xE  /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET          0xF  /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK         0xFF
+#define I40E_ALT_BW_RELATIVE_MASK      0x40000000
+#define I40E_ALT_BW_VALID_MASK         0x80000000
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512        0x00010000
+#endif /* _I40E_TYPE_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl.h
new file mode 100644 (file)
index 0000000..ef78a91
--- /dev/null
@@ -0,0 +1,371 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ */
+       I40E_VIRTCHNL_OP_UNKNOWN = 0,
+       I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+       I40E_VIRTCHNL_OP_RESET_VF = 2,
+       I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+       I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+       I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+       I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+       I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+       I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+       I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+       I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+       I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+       I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+       I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+       I40E_VIRTCHNL_OP_GET_STATS = 15,
+       I40E_VIRTCHNL_OP_FCOE = 16,
+       I40E_VIRTCHNL_OP_EVENT = 17,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+       u8 pad[8];                       /* AQ flags/opcode/len/retval fields */
+       enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+       i40e_status v_retval;  /* ditto for desc->retval */
+       u32 vfid;                        /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR            1
+#define I40E_VIRTCHNL_VERSION_MINOR            1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct i40e_virtchnl_version_info {
+       u32 major;
+       u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+       u16 vsi_id;
+       u16 num_queue_pairs;
+       enum i40e_vsi_type vsi_type;
+       u16 qset_handle;
+       u8 default_mac_addr[ETH_ALEN];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2            0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP         0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE          0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ                0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG       0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR     0x00000020
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
+
+struct i40e_virtchnl_vf_resource {
+       u16 num_vsis;
+       u16 num_queue_pairs;
+       u16 max_vectors;
+       u16 max_mtu;
+
+       u32 vf_offload_flags;
+       u32 max_fcoe_contexts;
+       u32 max_fcoe_filters;
+
+       struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+       u16 vsi_id;
+       u16 queue_id;
+       u16 ring_len;           /* number of descriptors, multiple of 8 */
+       u16 headwb_enabled;
+       u64 dma_ring_addr;
+       u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+       u16 vsi_id;
+       u16 queue_id;
+       u32 ring_len;           /* number of descriptors, multiple of 32 */
+       u16 hdr_size;
+       u16 splithdr_enabled;
+       u32 databuffer_size;
+       u32 max_pkt_size;
+       u64 dma_ring_addr;
+       enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+       /* NOTE: vsi_id and queue_id should be identical for both queues. */
+       struct i40e_virtchnl_txq_info txq;
+       struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+       u16 vsi_id;
+       u16 num_queue_pairs;
+       struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+       u16 vsi_id;
+       u16 vector_id;
+       u16 rxq_map;
+       u16 txq_map;
+       u16 rxitr_idx;
+       u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+       u16 num_vectors;
+       struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+       u16 vsi_id;
+       u16 pad;
+       u32 rx_queues;
+       u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+       u8 addr[ETH_ALEN];
+       u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+       u16 vsi_id;
+       u16 num_elements;
+       struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+       u16 vsi_id;
+       u16 num_elements;
+       u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+       u16 vsi_id;
+       u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC   0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+       I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+       I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+       I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+       I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO            0
+#define I40E_PF_EVENT_SEVERITY_ATTENTION       1
+#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM    255
+
+struct i40e_virtchnl_pf_event {
+       enum i40e_virtchnl_event_codes event;
+       union {
+               struct {
+                       enum i40e_aq_link_speed link_speed;
+                       bool link_status;
+               } link_event;
+       } event_data;
+
+       int severity;
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+       I40E_VFR_INPROGRESS = 0,
+       I40E_VFR_COMPLETED,
+       I40E_VFR_VFACTIVE,
+       I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.c
new file mode 100644 (file)
index 0000000..3af1ad9
--- /dev/null
@@ -0,0 +1,2512 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+
+/*********************notification routines***********************/
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the PF structure
+ * @opcode: operation code
+ * @retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+                                enum i40e_virtchnl_ops v_opcode,
+                                i40e_status v_retval, u8 *msg,
+                                u16 msglen)
+{
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_vf *vf = pf->vf;
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+               /* Not all vfs are enabled so skip the ones that are not */
+               if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+                   !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+                       continue;
+
+               /* Ignore return value on purpose - a given VF may fail, but
+                * we need to keep going and send to all of them
+                */
+               i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
+                                      msg, msglen, NULL);
+       }
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ **/
+static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
+{
+       struct i40e_virtchnl_pf_event pfe;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_link_status *ls = &pf->hw.phy.link_info;
+       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+       pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+       pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       if (vf->link_forced) {
+               pfe.event_data.link_event.link_status = vf->link_up;
+               pfe.event_data.link_event.link_speed =
+                       (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+       } else {
+#endif
+               pfe.event_data.link_event.link_status =
+                       ls->link_info & I40E_AQ_LINK_UP;
+               pfe.event_data.link_event.link_speed = ls->link_speed;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       }
+#endif
+       i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+                              I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the PF structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++)
+               i40e_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+       struct i40e_virtchnl_pf_event pfe;
+
+       pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+       pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+       i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+                            (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the VF structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+       struct i40e_virtchnl_pf_event pfe;
+       int abs_vf_id;
+
+       /* validate the request */
+       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+               return;
+
+       /* verify if the VF is in either init or active before proceeding */
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+           !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+               return;
+
+       abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+
+       pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+       pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+       i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+                              I40E_SUCCESS, (u8 *)&pfe,
+                              sizeof(struct i40e_virtchnl_pf_event), NULL);
+}
+/***********************misc routines*****************************/
+
+/**
+ * i40e_vc_disable_vf
+ * @pf: pointer to the PF info
+ * @vf: pointer to the VF info
+ *
+ * Disable the VF through a SW reset
+ **/
+static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+{
+       i40e_vc_notify_vf_reset(vf);
+       i40e_reset_vf(vf, false);
+}
+
+/**
+ * i40e_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
+ *
+ * check for the valid VSI id
+ **/
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
+
+       return (vsi && (vsi->vf_id == vf->vf_id));
+}
+
+/**
+ * i40e_vc_isvalid_queue_id
+ * @vf: pointer to the VF info
+ * @vsi_id: vsi id
+ * @qid: vsi relative queue id
+ *
+ * check for the valid queue id
+ **/
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
+                                           u8 qid)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
+
+       return (vsi && (qid < vsi->alloc_queue_pairs));
+}
+
+/**
+ * i40e_vc_isvalid_vector_id
+ * @vf: pointer to the VF info
+ * @vector_id: VF relative vector id
+ *
+ * check for the valid vector id
+ **/
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+{
+       struct i40e_pf *pf = vf->pf;
+
+       return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+}
+
+/***********************vf resource mgmt routines*****************/
+
+/**
+ * i40e_vc_get_pf_queue_id
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
+ * @vsi_queue_id: vsi relative queue id
+ *
+ * return PF relative queue id
+ **/
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
+                                  u8 vsi_queue_id)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
+       u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+
+       if (le16_to_cpu(vsi->info.mapping_flags) &
+           I40E_AQ_VSI_QUE_MAP_NONCONTIG)
+               pf_queue_id =
+                       le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
+       else
+               pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
+                             vsi_queue_id;
+
+       return pf_queue_id;
+}
+
+/**
+ * i40e_config_irq_link_list
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as given by the FW
+ * @vecmap: irq map info
+ *
+ * configure irq link list from the map
+ **/
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
+                                     struct i40e_virtchnl_vector_map *vecmap)
+{
+       unsigned long linklistmap = 0, tempmap;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       u16 vsi_queue_id, pf_queue_id;
+       enum i40e_queue_type qtype;
+       u16 next_q, vector_id;
+       u32 reg, reg_idx;
+       u16 itr_idx = 0;
+
+       vector_id = vecmap->vector_id;
+       /* setup the head */
+       if (0 == vector_id)
+               reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+       else
+               reg_idx = I40E_VPINT_LNKLSTN(
+                    ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
+                    (vector_id - 1));
+
+       if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
+               /* Special case - No queues mapped on this vector */
+               wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+               goto irq_list_done;
+       }
+       tempmap = vecmap->rxq_map;
+       for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
+               linklistmap |= (1 <<
+                               (I40E_VIRTCHNL_SUPPORTED_QTYPES *
+                                vsi_queue_id));
+       }
+
+       tempmap = vecmap->txq_map;
+       for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
+               linklistmap |= (1 <<
+                               (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+                                + 1));
+       }
+
+       next_q = find_first_bit(&linklistmap,
+                               (I40E_MAX_VSI_QP *
+                                I40E_VIRTCHNL_SUPPORTED_QTYPES));
+       vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
+       qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+       reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
+
+       wr32(hw, reg_idx, reg);
+
+       while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+               switch (qtype) {
+               case I40E_QUEUE_TYPE_RX:
+                       reg_idx = I40E_QINT_RQCTL(pf_queue_id);
+                       itr_idx = vecmap->rxitr_idx;
+                       break;
+               case I40E_QUEUE_TYPE_TX:
+                       reg_idx = I40E_QINT_TQCTL(pf_queue_id);
+                       itr_idx = vecmap->txitr_idx;
+                       break;
+               default:
+                       break;
+               }
+
+               next_q = find_next_bit(&linklistmap,
+                                      (I40E_MAX_VSI_QP *
+                                       I40E_VIRTCHNL_SUPPORTED_QTYPES),
+                                      next_q + 1);
+               if (next_q <
+                   (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+                       vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
+                       qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
+                       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
+                                                             vsi_queue_id);
+               } else {
+                       pf_queue_id = I40E_QUEUE_END_OF_LIST;
+                       qtype = 0;
+               }
+
+               /* format for the RQCTL & TQCTL regs is same */
+               reg = (vector_id) |
+                   (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+                   (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+                   BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+                   (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+               wr32(hw, reg_idx, reg);
+       }
+
+       /* if the vf is running in polling mode and using interrupt zero,
+        * need to disable auto-mask on enabling zero interrupt for VFs.
+        */
+       if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
+           (vector_id == 0)) {
+               reg = rd32(hw, I40E_GLINT_CTL);
+               if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
+                       reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+                       wr32(hw, I40E_GLINT_CTL, reg);
+               }
+       }
+
+irq_list_done:
+       i40e_flush(hw);
+}
+
+/**
+ * i40e_config_vsi_tx_queue
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure tx queue
+ **/
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
+                                   u16 vsi_queue_id,
+                                   struct i40e_virtchnl_txq_info *info)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_hmc_obj_txq tx_ctx;
+       struct i40e_vsi *vsi;
+       u16 pf_queue_id;
+       u32 qtx_ctl;
+       int ret = 0;
+
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+       vsi = i40e_find_vsi_from_id(pf, vsi_id);
+
+       /* clear the context structure first */
+       memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
+
+       /* only set the required fields */
+       tx_ctx.base = info->dma_ring_addr / 128;
+       tx_ctx.qlen = info->ring_len;
+       tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
+       tx_ctx.rdylist_act = 0;
+       tx_ctx.head_wb_ena = info->headwb_enabled;
+       tx_ctx.head_wb_addr = info->dma_headwb_addr;
+
+       /* clear the context in the HMC */
+       ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
+       if (ret) {
+               dev_err(&pf->pdev->dev,
+                       "Failed to clear VF LAN Tx queue context %d, error: %d\n",
+                       pf_queue_id, ret);
+               ret = -ENOENT;
+               goto error_context;
+       }
+
+       /* set the context in the HMC */
+       ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
+       if (ret) {
+               dev_err(&pf->pdev->dev,
+                       "Failed to set VF LAN Tx queue context %d error: %d\n",
+                       pf_queue_id, ret);
+               ret = -ENOENT;
+               goto error_context;
+       }
+
+       /* associate this queue with the PCI VF function */
+       qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
+       qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+                   & I40E_QTX_CTL_PF_INDX_MASK);
+       qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
+                    << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+                   & I40E_QTX_CTL_VFVM_INDX_MASK);
+       wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
+       i40e_flush(hw);
+
+error_context:
+       return ret;
+}
+
+/**
+ * i40e_config_vsi_rx_queue
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI  as provided by the FW
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure rx queue
+ **/
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
+                                   u16 vsi_queue_id,
+                                   struct i40e_virtchnl_rxq_info *info)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_hmc_obj_rxq rx_ctx;
+       u16 pf_queue_id;
+       int ret = 0;
+
+       pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+
+       /* clear the context structure first */
+       memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+
+       /* only set the required fields */
+       rx_ctx.base = info->dma_ring_addr / 128;
+       rx_ctx.qlen = info->ring_len;
+
+       if (info->splithdr_enabled) {
+               rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
+                                 I40E_RX_SPLIT_IP      |
+                                 I40E_RX_SPLIT_TCP_UDP |
+                                 I40E_RX_SPLIT_SCTP;
+               /* header length validation */
+               if (info->hdr_size > ((2 * 1024) - 64)) {
+                       ret = -EINVAL;
+                       goto error_param;
+               }
+               rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+               /* set splitalways mode 10b */
+               rx_ctx.dtype = 0x2;
+       }
+
+       /* databuffer length validation */
+       if (info->databuffer_size > ((16 * 1024) - 128)) {
+               ret = -EINVAL;
+               goto error_param;
+       }
+       rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+       /* max pkt. length validation */
+       if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
+               ret = -EINVAL;
+               goto error_param;
+       }
+       rx_ctx.rxmax = info->max_pkt_size;
+
+       /* enable 32bytes desc always */
+       rx_ctx.dsize = 1;
+
+       /* default values */
+       rx_ctx.lrxqthresh = 2;
+       rx_ctx.crcstrip = 1;
+       rx_ctx.prefena = 1;
+       rx_ctx.l2tsel = 1;
+
+       /* clear the context in the HMC */
+       ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
+       if (ret) {
+               dev_err(&pf->pdev->dev,
+                       "Failed to clear VF LAN Rx queue context %d, error: %d\n",
+                       pf_queue_id, ret);
+               ret = -ENOENT;
+               goto error_param;
+       }
+
+       /* set the context in the HMC */
+       ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
+       if (ret) {
+               dev_err(&pf->pdev->dev,
+                       "Failed to set VF LAN Rx queue context %d error: %d\n",
+                       pf_queue_id, ret);
+               ret = -ENOENT;
+               goto error_param;
+       }
+
+error_param:
+       return ret;
+}
+
+/**
+ * i40e_alloc_vsi_res
+ * @vf: pointer to the VF info
+ * @type: type of VSI to allocate
+ *
+ * alloc VF vsi context & resources
+ **/
+static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
+{
+       struct i40e_mac_filter *f = NULL;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi;
+       int ret = 0;
+
+       vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
+
+       if (!vsi) {
+               dev_err(&pf->pdev->dev,
+                       "add vsi failed for VF %d, aq_err %d\n",
+                       vf->vf_id, pf->hw.aq.asq_last_status);
+               ret = -ENOENT;
+               goto error_alloc_vsi_res;
+       }
+       if (type == I40E_VSI_SRIOV) {
+               u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+               vf->lan_vsi_idx = vsi->idx;
+               vf->lan_vsi_id = vsi->id;
+               /* If the port VLAN has been configured and then the
+                * VF driver was removed then the VSI port VLAN
+                * configuration was destroyed.  Check if there is
+                * a port VLAN and restore the VSI configuration if
+                * needed.
+                */
+               if (vf->port_vlan_id)
+                       i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+
+               spin_lock_bh(&vsi->mac_filter_list_lock);
+               f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
+                                   true, false);
+               if (!f)
+                       dev_info(&pf->pdev->dev,
+                                "Could not allocate VF MAC addr\n");
+               f = i40e_add_filter(vsi, brdcast,
+                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
+                                   true, false);
+               if (!f)
+                       dev_info(&pf->pdev->dev,
+                                "Could not allocate VF broadcast filter\n");
+               spin_unlock_bh(&vsi->mac_filter_list_lock);
+       }
+
+       /* program mac filter */
+       ret = i40e_sync_vsi_filters(vsi, false);
+       if (ret)
+               dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+
+       /* Set VF bandwidth if specified */
+       if (vf->tx_rate) {
+               ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+                                                 vf->tx_rate / 50, 0, NULL);
+               if (ret)
+                       dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+                               vf->vf_id, ret);
+       }
+
+error_alloc_vsi_res:
+       return ret;
+}
+
+/**
+ * i40e_enable_vf_mappings
+ * @vf: pointer to the VF info
+ *
+ * enable VF mappings
+ **/
+static void i40e_enable_vf_mappings(struct i40e_vf *vf)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg, total_queue_pairs = 0;
+       int j;
+
+       /* Tell the hardware we're using noncontiguous mapping. HW requires
+        * that VF queues be mapped using this method, even when they are
+        * contiguous in real life
+        */
+       wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+            I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+       /* enable VF vplan_qtable mappings */
+       reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
+       wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
+
+       /* map PF queues to VF queues */
+       for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
+               u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
+
+               reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+               wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
+               total_queue_pairs++;
+       }
+
+       /* map PF queues to VSI */
+       for (j = 0; j < 7; j++) {
+               if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
+                       reg = 0x07FF07FF;       /* unused */
+               } else {
+                       u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
+                                                         j * 2);
+                       reg = qid;
+                       qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
+                                                     (j * 2) + 1);
+                       reg |= qid << 16;
+               }
+               wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
+       }
+
+       i40e_flush(hw);
+}
+
+/**
+ * i40e_disable_vf_mappings
+ * @vf: pointer to the VF info
+ *
+ * disable VF mappings
+ **/
+static void i40e_disable_vf_mappings(struct i40e_vf *vf)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       int i;
+
+       /* disable qp mappings */
+       wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
+       for (i = 0; i < I40E_MAX_VSI_QP; i++)
+               wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
+                    I40E_QUEUE_END_OF_LIST);
+       i40e_flush(hw);
+}
+
+/**
+ * i40e_free_vf_res
+ * @vf: pointer to the VF info
+ *
+ * free VF resources
+ **/
+static void i40e_free_vf_res(struct i40e_vf *vf)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_idx, reg;
+       int i, msix_vf;
+
+       /* free vsi & disconnect it from the parent uplink */
+       if (vf->lan_vsi_idx) {
+               i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+               vf->lan_vsi_idx = 0;
+               vf->lan_vsi_id = 0;
+       }
+       msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
+       /* disable interrupts so the VF starts in a known state */
+       for (i = 0; i < msix_vf; i++) {
+               /* format is same for both registers */
+               if (0 == i)
+                       reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+               else
+                       reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+                                                     (vf->vf_id))
+                                                    + (i - 1));
+               wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+               i40e_flush(hw);
+       }
+
+       /* clear the irq settings */
+       for (i = 0; i < msix_vf; i++) {
+               /* format is same for both registers */
+               if (0 == i)
+                       reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+               else
+                       reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+                                                     (vf->vf_id))
+                                                    + (i - 1));
+               reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+                      I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+               wr32(hw, reg_idx, reg);
+               i40e_flush(hw);
+       }
+       /* reset some of the state varibles keeping
+        * track of the resources
+        */
+       vf->num_queue_pairs = 0;
+       vf->vf_states = 0;
+       clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+}
+
+/**
+ * i40e_alloc_vf_res
+ * @vf: pointer to the VF info
+ *
+ * allocate VF resources
+ **/
+static int i40e_alloc_vf_res(struct i40e_vf *vf)
+{
+       struct i40e_pf *pf = vf->pf;
+       int total_queue_pairs = 0;
+       int ret;
+
+       /* allocate hw vsi context & associated resources */
+       ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
+       if (ret)
+               goto error_alloc;
+       total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+       set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+       /* store the total qps number for the runtime
+        * VF req validation
+        */
+       vf->num_queue_pairs = total_queue_pairs;
+
+       /* VF is now completely initialized */
+       set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+
+error_alloc:
+       if (ret)
+               i40e_free_vf_res(vf);
+
+       return ret;
+}
+
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_MASK 0x20
+/**
+ * i40e_quiesce_vf_pci
+ * @vf: pointer to the VF structure
+ *
+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
+ * if the transactions never clear.
+ **/
+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       int vf_abs_id, i;
+       u32 reg;
+
+       vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+       wr32(hw, I40E_PF_PCI_CIAA,
+            VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+       for (i = 0; i < 100; i++) {
+               reg = rd32(hw, I40E_PF_PCI_CIAD);
+               if ((reg & VF_TRANS_PENDING_MASK) == 0)
+                       return 0;
+               udelay(1);
+       }
+       return -EIO;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the VF structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the VF
+ **/
+void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       bool rsd = false;
+       int i;
+       u32 reg;
+
+       if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
+               return;
+
+       /* warn the VF */
+       clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+       /* In the case of a VFLR, the HW has already reset the VF and we
+        * just need to clean up, so don't hit the VFRTRIG register.
+        */
+       if (!flr) {
+               /* reset VF using VPGEN_VFRTRIG reg */
+               reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+               reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+               wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+               i40e_flush(hw);
+       }
+
+       if (i40e_quiesce_vf_pci(vf))
+               dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
+                       vf->vf_id);
+
+       /* poll VPGEN_VFRSTAT reg to make sure
+        * that reset is complete
+        */
+       for (i = 0; i < 10; i++) {
+               /* VF reset requires driver to first reset the VF and then
+                * poll the status register to make sure that the reset
+                * completed successfully. Due to internal HW FIFO flushes,
+                * we must wait 10ms before the register will be valid.
+                */
+               usleep_range(10000, 20000);
+               reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+               if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+                       rsd = true;
+                       break;
+               }
+       }
+
+       if (flr)
+               usleep_range(10000, 20000);
+
+       if (!rsd)
+               dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+                       vf->vf_id);
+       usleep_range(10000, 20000);
+       wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+       /* clear the reset bit in the VPGEN_VFRTRIG reg */
+       reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+       reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+       wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+       /* On initial reset, we won't have any queues */
+       if (vf->lan_vsi_idx == 0)
+               goto complete_reset;
+
+       i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
+complete_reset:
+       /* reallocate VF resources to reset the VSI state */
+       i40e_free_vf_res(vf);
+       if (!i40e_alloc_vf_res(vf)) {
+               i40e_enable_vf_mappings(vf);
+               set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+               clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+       }
+       /* tell the VF the reset is done */
+       wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+       i40e_flush(hw);
+       clear_bit(__I40E_VF_DISABLE, &pf->state);
+}
+
+/**
+ * i40e_free_vfs
+ * @pf: pointer to the PF structure
+ *
+ * free VF resources
+ **/
+void i40e_free_vfs(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg_idx, bit_idx;
+       int i, tmp, vf_id;
+
+       if (!pf->vf)
+               return;
+       while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
+               usleep_range(1000, 2000);
+
+       for (i = 0; i < pf->num_alloc_vfs; i++)
+               if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+                       i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+                                              false);
+
+       /* Disable IOV before freeing resources. This lets any VF drivers
+        * running in the host get themselves cleaned up before we yank
+        * the carpet out from underneath their feet.
+        */
+       if (!pci_vfs_assigned(pf->pdev))
+               pci_disable_sriov(pf->pdev);
+       else
+               dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
+       msleep(20); /* let any messages in transit get finished up */
+
+       /* free up VF resources */
+       tmp = pf->num_alloc_vfs;
+       pf->num_alloc_vfs = 0;
+       for (i = 0; i < tmp; i++) {
+               if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+                       i40e_free_vf_res(&pf->vf[i]);
+               /* disable qp mappings */
+               i40e_disable_vf_mappings(&pf->vf[i]);
+       }
+
+       kfree(pf->vf);
+       pf->vf = NULL;
+
+       /* This check is for when the driver is unloaded while VFs are
+        * assigned. Setting the number of VFs to 0 through sysfs is caught
+        * before this function ever gets called.
+        */
+       if (!pci_vfs_assigned(pf->pdev)) {
+               /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
+                * work correctly when SR-IOV gets re-enabled.
+                */
+               for (vf_id = 0; vf_id < tmp; vf_id++) {
+                       reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+                       bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+               }
+       }
+       clear_bit(__I40E_VF_DISABLE, &pf->state);
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * i40e_alloc_vfs
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of vfs to allocate
+ *
+ * allocate VF resources
+ **/
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+{
+       struct i40e_vf *vfs;
+       int i, ret = 0;
+
+       /* Disable interrupt 0 so we don't try to handle the VFLR. */
+       i40e_irq_dynamic_disable_icr0(pf);
+
+       /* Check to see if we're just allocating resources for extant VFs */
+       if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
+               ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+               if (ret) {
+                       pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+                       pf->num_alloc_vfs = 0;
+                       goto err_iov;
+               }
+       }
+       /* allocate memory */
+       vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
+       if (!vfs) {
+               ret = -ENOMEM;
+               goto err_alloc;
+       }
+       pf->vf = vfs;
+
+       /* apply default profile */
+       for (i = 0; i < num_alloc_vfs; i++) {
+               vfs[i].pf = pf;
+               vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
+               vfs[i].vf_id = i;
+
+               /* assign default capabilities */
+               set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+               vfs[i].spoofchk = true;
+#endif
+               /* VF resources get allocated during reset */
+               i40e_reset_vf(&vfs[i], false);
+
+       }
+       pf->num_alloc_vfs = num_alloc_vfs;
+
+err_alloc:
+       if (ret)
+               i40e_free_vfs(pf);
+err_iov:
+       /* Re-enable interrupt 0. */
+       i40e_irq_dynamic_enable_icr0(pf);
+       return ret;
+}
+
+#endif
+#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+/**
+ * i40e_pci_sriov_enable
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs
+ **/
+static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+       struct i40e_pf *pf = pci_get_drvdata(pdev);
+       int pre_existing_vfs = pci_num_vf(pdev);
+       int err = 0;
+
+       if (test_bit(__I40E_TESTING, &pf->state)) {
+               dev_warn(&pdev->dev,
+                        "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+               err = -EPERM;
+               goto err_out;
+       }
+
+       if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+               i40e_free_vfs(pf);
+       else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+               goto out;
+
+       if (num_vfs > pf->num_req_vfs) {
+               dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
+                        num_vfs, pf->num_req_vfs);
+               err = -EPERM;
+               goto err_out;
+       }
+
+       dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
+       err = i40e_alloc_vfs(pf, num_vfs);
+       if (err) {
+               dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
+               goto err_out;
+       }
+
+out:
+       return num_vfs;
+
+err_out:
+       return err;
+#endif
+       return 0;
+}
+
+/**
+ * i40e_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+       struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+       if (num_vfs) {
+               if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset_safe(pf,
+                                          BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               }
+               return i40e_pci_sriov_enable(pdev, num_vfs);
+       }
+       if (!pci_vfs_assigned(pdev)) {
+               i40e_free_vfs(pf);
+               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+               i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+       } else {
+               dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+#endif
+
+/***********************virtual channel routines******************/
+
+/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+                                 u32 v_retval, u8 *msg, u16 msglen)
+{
+       struct i40e_pf *pf;
+       struct i40e_hw *hw;
+       int abs_vf_id;
+       i40e_status aq_ret;
+
+       /* validate the request */
+       if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+               return -EINVAL;
+
+       pf = vf->pf;
+       hw = &pf->hw;
+       abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+       /* single place to detect unsuccessful return values */
+       if (v_retval) {
+               vf->num_invalid_msgs++;
+               dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
+                       v_opcode, v_retval);
+               if (vf->num_invalid_msgs >
+                   I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
+                       dev_err(&pf->pdev->dev,
+                               "Number of invalid messages exceeded for VF %d\n",
+                               vf->vf_id);
+                       dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+                       set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+               }
+       } else {
+               vf->num_valid_msgs++;
+               /* reset the invalid counter, if a valid message is received. */
+               vf->num_invalid_msgs = 0;
+       }
+
+       aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
+                                       msg, msglen, NULL);
+       if (aq_ret) {
+               dev_err(&pf->pdev->dev,
+                       "Unable to send the message to VF %d aq_err %d\n",
+                       vf->vf_id, pf->hw.aq.asq_last_status);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_vc_send_resp_to_vf
+ * @vf: pointer to the VF info
+ * @opcode: operation code
+ * @retval: return value
+ *
+ * send resp msg to VF
+ **/
+static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+                                  enum i40e_virtchnl_ops opcode,
+                                  i40e_status retval)
+{
+       return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+}
+
+/**
+ * i40e_vc_get_version_msg
+ * @vf: pointer to the VF info
+ *
+ * called from the VF to request the API version used by the PF
+ **/
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
+{
+       struct i40e_virtchnl_version_info info = {
+               I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
+       };
+
+       vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+       /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+       if (VF_IS_V10(vf))
+               info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+       return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+                                     I40E_SUCCESS, (u8 *)&info,
+                                     sizeof(struct
+                                            i40e_virtchnl_version_info));
+}
+
+/**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF to request its resources
+ **/
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+{
+       struct i40e_virtchnl_vf_resource *vfres = NULL;
+       struct i40e_pf *pf = vf->pf;
+       i40e_status aq_ret = 0;
+       struct i40e_vsi *vsi;
+       int i = 0, len = 0;
+       int num_vsis = 1;
+       int ret;
+
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto err;
+       }
+
+       len = (sizeof(struct i40e_virtchnl_vf_resource) +
+              sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
+
+       vfres = kzalloc(len, GFP_KERNEL);
+       if (!vfres) {
+               aq_ret = I40E_ERR_NO_MEMORY;
+               len = 0;
+               goto err;
+       }
+       if (VF_IS_V11(vf))
+               vf->driver_caps = *(u32 *)msg;
+       else
+               vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+                                 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+                                 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+       vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (!vsi->info.pvid)
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+
+       if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
+       vfres->num_vsis = num_vsis;
+       vfres->num_queue_pairs = vf->num_queue_pairs;
+       vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+       if (vf->lan_vsi_idx) {
+               vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
+               vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
+               vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+               /* VFs only use TC 0 */
+               vfres->vsi_res[i].qset_handle
+                                         = LE16_TO_CPU(vsi->info.qs_handle[0]);
+               ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+                               vf->default_lan_addr.addr);
+               i++;
+       }
+       set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+err:
+       /* send the response back to the VF */
+       ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+                                    aq_ret, (u8 *)vfres, len);
+
+       kfree(vfres);
+       return ret;
+}
+
+/**
+ * i40e_vc_reset_vf_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
+ **/
+static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+{
+       if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+               i40e_reset_vf(vf, false);
+}
+
+/**
+ * i40e_vc_config_promiscuous_mode_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF to configure the promiscuous mode of
+ * VF vsis
+ **/
+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
+                                              u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_promisc_info *info =
+           (struct i40e_virtchnl_promisc_info *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       bool allmulti = false;
+       struct i40e_vsi *vsi;
+       i40e_status aq_ret;
+
+       vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+           !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+           (vsi->type != I40E_VSI_FCOE)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+       if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+               allmulti = true;
+       aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+                                                      allmulti, NULL);
+
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf,
+                                      I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_config_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF to configure the rx/tx
+ * queues
+ **/
+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_vsi_queue_config_info *qci =
+           (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+       struct i40e_virtchnl_queue_pair_info *qpi;
+       struct i40e_pf *pf = vf->pf;
+       u16 vsi_id, vsi_queue_id;
+       i40e_status aq_ret = 0;
+       int i;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi_id = qci->vsi_id;
+       if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+       for (i = 0; i < qci->num_queue_pairs; i++) {
+               qpi = &qci->qpair[i];
+               vsi_queue_id = qpi->txq.queue_id;
+               if ((qpi->txq.vsi_id != vsi_id) ||
+                   (qpi->rxq.vsi_id != vsi_id) ||
+                   (qpi->rxq.queue_id != vsi_queue_id) ||
+                   !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
+                       aq_ret = I40E_ERR_PARAM;
+                       goto error_param;
+               }
+
+               if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
+                                            &qpi->rxq) ||
+                   i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
+                                            &qpi->txq)) {
+                       aq_ret = I40E_ERR_PARAM;
+                       goto error_param;
+               }
+       }
+
+       /* set vsi num_queue_pairs in use to num configured by VF */
+       pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
+
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_config_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF to configure the irq to
+ * queue map
+ **/
+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_irq_map_info *irqmap_info =
+           (struct i40e_virtchnl_irq_map_info *)msg;
+       struct i40e_virtchnl_vector_map *map;
+       u16 vsi_id, vsi_queue_id, vector_id;
+       i40e_status aq_ret = 0;
+       unsigned long tempmap;
+       int i;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < irqmap_info->num_vectors; i++) {
+               map = &irqmap_info->vecmap[i];
+
+               vector_id = map->vector_id;
+               vsi_id = map->vsi_id;
+               /* validate msg params */
+               if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
+                   !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+                       aq_ret = I40E_ERR_PARAM;
+                       goto error_param;
+               }
+
+               /* lookout for the invalid queue index */
+               tempmap = map->rxq_map;
+               for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
+                       if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+                                                     vsi_queue_id)) {
+                               aq_ret = I40E_ERR_PARAM;
+                               goto error_param;
+                       }
+               }
+
+               tempmap = map->txq_map;
+               for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
+                       if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+                                                     vsi_queue_id)) {
+                               aq_ret = I40E_ERR_PARAM;
+                               goto error_param;
+                       }
+               }
+
+               i40e_config_irq_link_list(vf, vsi_id, map);
+       }
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_enable_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF to enable all or specific queue(s)
+ **/
+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_queue_select *vqs =
+           (struct i40e_virtchnl_queue_select *)msg;
+       struct i40e_pf *pf = vf->pf;
+       u16 vsi_id = vqs->vsi_id;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
+               aq_ret = I40E_ERR_TIMEOUT;
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_disable_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF to disable all or specific
+ * queue(s)
+ **/
+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_queue_select *vqs =
+           (struct i40e_virtchnl_queue_select *)msg;
+       struct i40e_pf *pf = vf->pf;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
+               aq_ret = I40E_ERR_TIMEOUT;
+
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF to get vsi stats
+ **/
+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_queue_select *vqs =
+           (struct i40e_virtchnl_queue_select *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_eth_stats stats;
+       i40e_status aq_ret = 0;
+       struct i40e_vsi *vsi;
+
+       memset(&stats, 0, sizeof(struct i40e_eth_stats));
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (!vsi) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+       i40e_update_eth_stats(vsi);
+       stats = vsi->eth_stats;
+
+error_param:
+       /* send the response back to the VF */
+       return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
+                                     (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * i40e_check_vf_permission
+ * @vf: pointer to the VF info
+ * @macaddr: pointer to the MAC Address being checked
+ *
+ * Check if the VF has permission to add or delete unicast MAC address
+ * filters and return error code -EPERM if not.  Then check if the
+ * address filter requested is broadcast or zero and if so return
+ * an invalid MAC address error code.
+ **/
+static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
+{
+       struct i40e_pf *pf = vf->pf;
+       int ret = 0;
+
+       if (is_broadcast_ether_addr(macaddr) ||
+                  is_zero_ether_addr(macaddr)) {
+               dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
+               ret = I40E_ERR_INVALID_MAC_ADDR;
+       } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+                  !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
+               /* If the host VMM administrator has set the VF MAC address
+                * administratively via the ndo_set_vf_mac command then deny
+                * permission to the VF to add or delete unicast MAC addresses.
+                * The VF may request to set the MAC address filter already
+                * assigned to it so do not return an error in that case.
+                */
+               dev_err(&pf->pdev->dev,
+                       "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+               ret = -EPERM;
+       }
+       return ret;
+}
+
+/**
+ * i40e_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * add guest mac address filter
+ **/
+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_ether_addr_list *al =
+           (struct i40e_virtchnl_ether_addr_list *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = NULL;
+       u16 vsi_id = al->vsi_id;
+       i40e_status ret = 0;
+       int i;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+           !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+               ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < al->num_elements; i++) {
+               ret = i40e_check_vf_permission(vf, al->list[i].addr);
+               if (ret)
+                       goto error_param;
+       }
+       vsi = pf->vsi[vf->lan_vsi_idx];
+
+       /* Lock once, because all function inside for loop accesses VSI's
+        * MAC filter list which needs to be protected using same lock.
+        */
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
+       /* add new addresses to the list */
+       for (i = 0; i < al->num_elements; i++) {
+               struct i40e_mac_filter *f;
+
+               f = i40e_find_mac(vsi, al->list[i].addr, true, false);
+               if (!f) {
+                       if (i40e_is_vsi_in_vlan(vsi))
+                               f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
+                                                        true, false);
+                       else
+                               f = i40e_add_filter(vsi, al->list[i].addr, -1,
+                                                   true, false);
+               }
+
+               if (!f) {
+                       dev_err(&pf->pdev->dev,
+                               "Unable to add VF MAC filter\n");
+                       ret = I40E_ERR_PARAM;
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       goto error_param;
+               }
+       }
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       /* program the updated filter list */
+       ret = i40e_sync_vsi_filters(vsi, false);
+       if (ret)
+               dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+                       vf->vf_id, ret);
+
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+                                      ret);
+}
+
+/**
+ * i40e_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove guest mac address filter
+ **/
+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_ether_addr_list *al =
+           (struct i40e_virtchnl_ether_addr_list *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = NULL;
+       u16 vsi_id = al->vsi_id;
+       i40e_status ret = 0;
+       int i;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+           !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+               ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < al->num_elements; i++) {
+               if (is_broadcast_ether_addr(al->list[i].addr) ||
+                   is_zero_ether_addr(al->list[i].addr)) {
+                       dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
+                               al->list[i].addr);
+                       ret = I40E_ERR_INVALID_MAC_ADDR;
+                       goto error_param;
+               }
+       }
+       vsi = pf->vsi[vf->lan_vsi_idx];
+
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+       /* delete addresses from the list */
+       for (i = 0; i < al->num_elements; i++)
+               i40e_del_filter(vsi, al->list[i].addr,
+                               I40E_VLAN_ANY, true, false);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       /* program the updated filter list */
+       ret = i40e_sync_vsi_filters(vsi, false);
+       if (ret)
+               dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+                       vf->vf_id, ret);
+
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+                                      ret);
+}
+
+/**
+ * i40e_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * program guest vlan id
+ **/
+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_vlan_filter_list *vfl =
+           (struct i40e_virtchnl_vlan_filter_list *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = NULL;
+       u16 vsi_id = vfl->vsi_id;
+       i40e_status aq_ret = 0;
+       int i;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+           !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < vfl->num_elements; i++) {
+               if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+                       aq_ret = I40E_ERR_PARAM;
+                       dev_err(&pf->pdev->dev,
+                               "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+                       goto error_param;
+               }
+       }
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (vsi->info.pvid) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       i40e_vlan_stripping_enable(vsi);
+       for (i = 0; i < vfl->num_elements; i++) {
+               /* add new VLAN filter */
+               int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+
+               if (ret)
+                       dev_err(&pf->pdev->dev,
+                               "Unable to add VF vlan filter %d, error %d\n",
+                               vfl->vlan_id[i], ret);
+       }
+
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove programmed guest vlan id
+ **/
+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_virtchnl_vlan_filter_list *vfl =
+           (struct i40e_virtchnl_vlan_filter_list *)msg;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi = NULL;
+       u16 vsi_id = vfl->vsi_id;
+       i40e_status aq_ret = 0;
+       int i;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+           !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < vfl->num_elements; i++) {
+               if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+                       aq_ret = I40E_ERR_PARAM;
+                       goto error_param;
+               }
+       }
+
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (vsi->info.pvid) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       for (i = 0; i < vfl->num_elements; i++) {
+               int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+
+               if (ret)
+                       dev_err(&pf->pdev->dev,
+                               "Unable to delete VF vlan filter %d, error %d\n",
+                               vfl->vlan_id[i], ret);
+       }
+
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_validate_vf_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * validate msg
+ **/
+static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
+                                  u32 v_retval, u8 *msg, u16 msglen)
+{
+       bool err_msg_format = false;
+       int valid_len;
+
+       /* Check if VF is disabled. */
+       if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
+               return I40E_ERR_PARAM;
+
+       /* Validate message length. */
+       switch (v_opcode) {
+       case I40E_VIRTCHNL_OP_VERSION:
+               valid_len = sizeof(struct i40e_virtchnl_version_info);
+               break;
+       case I40E_VIRTCHNL_OP_RESET_VF:
+               valid_len = 0;
+               break;
+       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+               if (VF_IS_V11(vf))
+                       valid_len = sizeof(u32);
+               else
+                       valid_len = 0;
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+               valid_len = sizeof(struct i40e_virtchnl_txq_info);
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+               valid_len = sizeof(struct i40e_virtchnl_rxq_info);
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+               valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
+               if (msglen >= valid_len) {
+                       struct i40e_virtchnl_vsi_queue_config_info *vqc =
+                           (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+                       valid_len += (vqc->num_queue_pairs *
+                                     sizeof(struct
+                                            i40e_virtchnl_queue_pair_info));
+                       if (vqc->num_queue_pairs == 0)
+                               err_msg_format = true;
+               }
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+               valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
+               if (msglen >= valid_len) {
+                       struct i40e_virtchnl_irq_map_info *vimi =
+                           (struct i40e_virtchnl_irq_map_info *)msg;
+                       valid_len += (vimi->num_vectors *
+                                     sizeof(struct i40e_virtchnl_vector_map));
+                       if (vimi->num_vectors == 0)
+                               err_msg_format = true;
+               }
+               break;
+       case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+       case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+               valid_len = sizeof(struct i40e_virtchnl_queue_select);
+               break;
+       case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+       case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+               valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
+               if (msglen >= valid_len) {
+                       struct i40e_virtchnl_ether_addr_list *veal =
+                           (struct i40e_virtchnl_ether_addr_list *)msg;
+                       valid_len += veal->num_elements *
+                           sizeof(struct i40e_virtchnl_ether_addr);
+                       if (veal->num_elements == 0)
+                               err_msg_format = true;
+               }
+               break;
+       case I40E_VIRTCHNL_OP_ADD_VLAN:
+       case I40E_VIRTCHNL_OP_DEL_VLAN:
+               valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
+               if (msglen >= valid_len) {
+                       struct i40e_virtchnl_vlan_filter_list *vfl =
+                           (struct i40e_virtchnl_vlan_filter_list *)msg;
+                       valid_len += vfl->num_elements * sizeof(u16);
+                       if (vfl->num_elements == 0)
+                               err_msg_format = true;
+               }
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+               valid_len = sizeof(struct i40e_virtchnl_promisc_info);
+               break;
+       case I40E_VIRTCHNL_OP_GET_STATS:
+               valid_len = sizeof(struct i40e_virtchnl_queue_select);
+               break;
+       /* These are always errors coming from the VF. */
+       case I40E_VIRTCHNL_OP_EVENT:
+       case I40E_VIRTCHNL_OP_UNKNOWN:
+       default:
+               return -EPERM;
+       }
+       /* few more checks */
+       if ((valid_len != msglen) || (err_msg_format)) {
+               i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
+               return -EINVAL;
+       } else {
+               return 0;
+       }
+}
+
+/**
+ * i40e_vc_process_vf_msg
+ * @pf: pointer to the PF structure
+ * @vf_id: source VF id
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * called from the common aeq/arq handler to
+ * process request from VF
+ **/
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+                          u32 v_retval, u8 *msg, u16 msglen)
+{
+       struct i40e_hw *hw = &pf->hw;
+       unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+       struct i40e_vf *vf;
+       int ret;
+
+       pf->vf_aq_requests++;
+       if (local_vf_id >= pf->num_alloc_vfs)
+               return -EINVAL;
+       vf = &(pf->vf[local_vf_id]);
+       /* perform basic checks on the msg */
+       ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
+
+       if (ret) {
+               dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
+                       local_vf_id, v_opcode, msglen);
+               return ret;
+       }
+
+       switch (v_opcode) {
+       case I40E_VIRTCHNL_OP_VERSION:
+               ret = i40e_vc_get_version_msg(vf, msg);
+               break;
+       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+               ret = i40e_vc_get_vf_resources_msg(vf, msg);
+               break;
+       case I40E_VIRTCHNL_OP_RESET_VF:
+               i40e_vc_reset_vf_msg(vf);
+               ret = 0;
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+               ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+               ret = i40e_vc_config_queues_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+               ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+               ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+               i40e_vc_notify_vf_link_state(vf);
+               break;
+       case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+               ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+               ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+               ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_ADD_VLAN:
+               ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_DEL_VLAN:
+               ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_GET_STATS:
+               ret = i40e_vc_get_stats_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_UNKNOWN:
+       default:
+               dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
+                       v_opcode, local_vf_id);
+               ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
+                                             I40E_ERR_NOT_IMPLEMENTED);
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * i40e_vc_process_vflr_event
+ * @pf: pointer to the PF structure
+ *
+ * called from the vlfr irq handler to
+ * free up VF resources and state variables
+ **/
+int i40e_vc_process_vflr_event(struct i40e_pf *pf)
+{
+       u32 reg, reg_idx, bit_idx, vf_id;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_vf *vf;
+
+       if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+               return 0;
+
+       /* re-enable vflr interrupt cause */
+       reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+       reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+       wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+       i40e_flush(hw);
+
+       clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+       for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+               reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+               bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+               /* read GLGEN_VFLRSTAT register to find out the flr vfs */
+               vf = &pf->vf[vf_id];
+               reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
+               if (reg & BIT(bit_idx)) {
+                       /* clear the bit in GLGEN_VFLRSTAT */
+                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+
+                       if (!test_bit(__I40E_DOWN, &pf->state))
+                               i40e_reset_vf(vf, true);
+               }
+       }
+
+       return 0;
+}
+
+#ifdef IFLA_VF_MAX
+
+/**
+ * i40e_ndo_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: mac address
+ *
+ * program VF mac address
+ **/
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_mac_filter *f;
+       struct i40e_vf *vf;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev,
+                       "Invalid VF Identifier %d\n", vf_id);
+               ret = -EINVAL;
+               goto error_param;
+       }
+
+       vf = &(pf->vf[vf_id]);
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+               dev_err(&pf->pdev->dev,
+                       "Uninitialized VF %d\n", vf_id);
+               ret = -EINVAL;
+               goto error_param;
+       }
+
+       if (!is_valid_ether_addr(mac)) {
+               dev_err(&pf->pdev->dev,
+                       "Invalid VF ethernet address\n");
+               ret = -EINVAL;
+               goto error_param;
+       }
+
+       /* Lock once because below invoked function add/del_filter requires
+        * mac_filter_list_lock to be held
+        */
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+
+       /* delete the temporary mac address */
+       i40e_del_filter(vsi, vf->default_lan_addr.addr,
+                       vf->port_vlan_id ? vf->port_vlan_id : -1,
+                       true, false);
+
+       /* Delete all the filters for this VSI - we're going to kill it
+        * anyway.
+        */
+       list_for_each_entry(f, &vsi->mac_filter_list, list)
+               i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
+
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
+       /* program mac filter */
+       if (i40e_sync_vsi_filters(vsi, false)) {
+               dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+               ret = -EIO;
+               goto error_param;
+       }
+       ether_addr_copy(vf->default_lan_addr.addr, mac);
+       vf->pf_set_mac = true;
+       /* Force the VF driver stop so it has to reload with new MAC address */
+       i40e_vc_disable_vf(pf, vf);
+       dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
+
+error_param:
+       return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: mac address
+ * @qos: priority setting
+ *
+ * program VF vlan id and/or qos
+ **/
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+                             int vf_id, u16 vlan_id, u8 qos)
+{
+       u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       bool is_vsi_in_vlan = false;
+       struct i40e_vsi *vsi;
+       struct i40e_vf *vf;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+               ret = -EINVAL;
+               goto error_pvid;
+       }
+
+       if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
+               dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+               ret = -EINVAL;
+               goto error_pvid;
+       }
+
+       vf = &(pf->vf[vf_id]);
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+               dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+               ret = -EINVAL;
+               goto error_pvid;
+       }
+
+       if (le16_to_cpu(vsi->info.pvid) == vlanprio)
+               /* duplicate request, so just return success */
+               goto error_pvid;
+
+       spin_lock_bh(&vsi->mac_filter_list_lock);
+       is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
+       spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+       if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
+               dev_err(&pf->pdev->dev,
+                       "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
+                       vf_id);
+               /* Administrator Error - knock the VF offline until he does
+                * the right thing by reconfiguring his network correctly
+                * and then reloading the VF driver.
+                */
+               i40e_vc_disable_vf(pf, vf);
+       }
+
+       /* Check for condition where there was already a port VLAN ID
+        * filter set and now it is being deleted by setting it to zero.
+        * Additionally check for the condition where there was a port
+        * VLAN but now there is a new and different port VLAN being set.
+        * Before deleting all the old VLAN filters we must add new ones
+        * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
+        * MAC addresses deleted.
+        */
+       if ((!(vlan_id || qos) ||
+           vlanprio != le16_to_cpu(vsi->info.pvid)) &&
+           vsi->info.pvid)
+               ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
+
+       if (vsi->info.pvid) {
+               /* kill old VLAN */
+               ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+                                              VLAN_VID_MASK));
+               if (ret) {
+                       dev_info(&vsi->back->pdev->dev,
+                                "remove VLAN failed, ret=%d, aq_err=%d\n",
+                                ret, pf->hw.aq.asq_last_status);
+               }
+       }
+       if (vlan_id || qos)
+               ret = i40e_vsi_add_pvid(vsi, vlanprio);
+       else
+               i40e_vsi_remove_pvid(vsi);
+
+       if (vlan_id) {
+               dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+                        vlan_id, qos, vf_id);
+
+               /* add new VLAN filter */
+               ret = i40e_vsi_add_vlan(vsi, vlan_id);
+               if (ret) {
+                       dev_info(&vsi->back->pdev->dev,
+                                "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+                                vsi->back->hw.aq.asq_last_status);
+                       goto error_pvid;
+               }
+               /* Kill non-vlan MAC filters - ignore error return since
+                * there might not be any non-vlan MAC filters.
+                */
+               i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
+       }
+
+       if (ret) {
+               dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
+               goto error_pvid;
+       }
+       /* The Port VLAN needs to be saved across resets the same as the
+        * default LAN MAC address.
+        */
+       vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+       ret = 0;
+
+error_pvid:
+       return ret;
+}
+
+#define I40E_BW_CREDIT_DIVISOR 50     /* 50Mbps per BW credit */
+#define I40E_MAX_BW_INACTIVE_ACCUM 4  /* device can accumulate 4 credits max */
+/**
+ * i40e_ndo_set_vf_bw
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @tx_rate: Tx rate
+ *
+ * configure VF Tx rate
+ **/
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+                      int max_tx_rate)
+#else
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int max_tx_rate)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi;
+       struct i40e_vf *vf;
+       int speed = 0;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       vf = &(pf->vf[vf_id]);
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+               dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       switch (pf->hw.phy.link_info.link_speed) {
+       case I40E_LINK_SPEED_40GB:
+               speed = 40000;
+               break;
+       case I40E_LINK_SPEED_10GB:
+               speed = 10000;
+               break;
+       case I40E_LINK_SPEED_1GB:
+               speed = 1000;
+               break;
+       default:
+               break;
+       }
+
+       if (max_tx_rate > speed) {
+               dev_err(&pf->pdev->dev, "Invalid tx rate %d specified for VF %d.",
+                       max_tx_rate, vf->vf_id);
+               ret = -EINVAL;
+               goto error;
+       }
+       if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
+               dev_warn(&pf->pdev->dev, "Setting tx rate to minimum usable value of 50Mbps.\n");
+               max_tx_rate = 50;
+       }
+
+       /* TX rate credits are in values of 50Mbps, 0 is disabled*/
+       ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+                                         max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+                                         I40E_MAX_BW_INACTIVE_ACCUM,
+                                         NULL);
+       if (ret) {
+               dev_err(&pf->pdev->dev, "Unable to set tx rate, error code %d.\n",
+                       ret);
+               ret = -EIO;
+               goto error;
+       }
+       vf->tx_rate = max_tx_rate;
+error:
+       return ret;
+}
+
+/**
+ * i40e_ndo_enable_vf
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @enable: true to enable & false to disable
+ *
+ * enable/disable VF
+ **/
+int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable)
+{
+       return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ndo_get_vf_config
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
+ **/
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+                          int vf_id, struct ifla_vf_info *ivi)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_vf *vf;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+               ret = -EINVAL;
+               goto error_param;
+       }
+
+       vf = &(pf->vf[vf_id]);
+       /* first vsi is always the LAN vsi */
+       vsi = pf->vsi[vf->lan_vsi_idx];
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+               dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+               ret = -EINVAL;
+               goto error_param;
+       }
+
+       ivi->vf = vf_id;
+
+       ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
+
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+       ivi->max_tx_rate = vf->tx_rate;
+       ivi->min_tx_rate = 0;
+#else
+       ivi->tx_rate = vf->tx_rate;
+#endif
+       ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
+       ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
+                  I40E_VLAN_PRIORITY_SHIFT;
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       if (vf->link_forced == false)
+               ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+       else if (vf->link_up == true)
+               ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+       else
+               ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+#endif
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+       ivi->spoofchk = vf->spoofchk;
+#endif
+       ret = 0;
+
+error_param:
+       return ret;
+}
+
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+/**
+ * i40e_ndo_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_virtchnl_pf_event pfe;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_vf *vf;
+       int abs_vf_id;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+               ret = -EINVAL;
+               goto error_out;
+       }
+
+       vf = &pf->vf[vf_id];
+       abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+       pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+       pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+
+       switch (link) {
+       case IFLA_VF_LINK_STATE_AUTO:
+               vf->link_forced = false;
+               pfe.event_data.link_event.link_status =
+                       pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+               pfe.event_data.link_event.link_speed =
+                       pf->hw.phy.link_info.link_speed;
+               break;
+       case IFLA_VF_LINK_STATE_ENABLE:
+               vf->link_forced = true;
+               vf->link_up = true;
+               pfe.event_data.link_event.link_status = true;
+               pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+               break;
+       case IFLA_VF_LINK_STATE_DISABLE:
+               vf->link_forced = true;
+               vf->link_up = false;
+               pfe.event_data.link_event.link_status = false;
+               pfe.event_data.link_event.link_speed = 0;
+               break;
+       default:
+               ret = -EINVAL;
+               goto error_out;
+       }
+       /* Notify the VF of its new link state */
+       i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+                              I40E_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL);
+
+error_out:
+       return ret;
+}
+
+#endif /* HAVE_NDO_SET_VF_LINK_STATE */
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+/**
+ * i40e_ndo_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @enable: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ **/
+int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_vsi_context ctxt;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_vf *vf;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       vf = &(pf->vf[vf_id]);
+
+       if (enable == vf->spoofchk)
+               goto out;
+
+       vf->spoofchk = enable;
+       memset(&ctxt, 0, sizeof(ctxt));
+       ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
+       ctxt.pf_num = pf->hw.pf_id;
+       ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+       if (enable)
+               ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+                                       I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
+       ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+       if (ret) {
+               dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
+                       ret);
+               ret = -EIO;
+       }
+out:
+       return ret;
+}
+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
+
+#endif /* IFLA_VF_MAX */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/i40e_virtchnl_pf.h
new file mode 100644 (file)
index 0000000..85a5131
--- /dev/null
@@ -0,0 +1,158 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_PF_H_
+#define _I40E_VIRTCHNL_PF_H_
+
+#include "i40e.h"
+
+#define I40E_MAX_VLANID 4095
+
+#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
+
+#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED    3
+#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED  10
+
+#define I40E_VLAN_PRIORITY_SHIFT       12
+#define I40E_VLAN_MASK                 0xFFF
+#define I40E_PRIORITY_MASK             0x7000
+
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
+/* Various queue ctrls */
+enum i40e_queue_ctrl {
+       I40E_QUEUE_CTRL_UNKNOWN = 0,
+       I40E_QUEUE_CTRL_ENABLE,
+       I40E_QUEUE_CTRL_ENABLECHECK,
+       I40E_QUEUE_CTRL_DISABLE,
+       I40E_QUEUE_CTRL_DISABLECHECK,
+       I40E_QUEUE_CTRL_FASTDISABLE,
+       I40E_QUEUE_CTRL_FASTDISABLECHECK,
+};
+
+/* VF states */
+enum i40e_vf_states {
+       I40E_VF_STAT_INIT = 0,
+       I40E_VF_STAT_ACTIVE,
+       I40E_VF_STAT_FCOEENA,
+       I40E_VF_STAT_DISABLED,
+};
+
+/* VF capabilities */
+enum i40e_vf_capabilities {
+       I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+       I40E_VIRTCHNL_VF_CAP_L2,
+#ifdef I40E_FCOE
+       I40E_VIRTCHNL_VF_CAP_FCOE,
+#endif
+};
+
+/* VF information structure */
+struct i40e_vf {
+       struct i40e_pf *pf;
+
+       /* VF id in the PF space */
+       u16 vf_id;
+       /* all VF vsis connect to the same parent */
+       enum i40e_switch_element_types parent_type;
+       struct i40e_virtchnl_version_info vf_ver;
+       u32 driver_caps; /* reported by VF driver */
+
+       /* VF Port Extender (PE) stag if used */
+       u16 stag;
+
+       struct i40e_virtchnl_ether_addr default_lan_addr;
+       struct i40e_virtchnl_ether_addr default_fcoe_addr;
+       u16 port_vlan_id;
+       bool pf_set_mac;        /* The VMM admin set the VF MAC address */
+
+       /* VSI indices - actual VSI pointers are maintained in the PF structure
+        * When assigned, these will be non-zero, because VSI 0 is always
+        * the main LAN VSI for the PF.
+        */
+       u8 lan_vsi_idx;         /* index into PF struct */
+       u8 lan_vsi_id;          /* ID as used by firmware */
+#ifdef I40E_FCOE
+       u8 fcoe_vsi_index;
+       u8 fcoe_vsi_id;
+#endif
+
+       u8 num_queue_pairs;     /* num of qps assigned to VF vsis */
+       u64 num_mdd_events;     /* num of mdd events detected */
+       /* num of continuous malformed or invalid msgs detected */
+       u64 num_invalid_msgs;
+       u64 num_valid_msgs;     /* num of valid msgs detected */
+
+       unsigned long vf_caps;  /* vf's adv. capabilities */
+       unsigned long vf_states;        /* vf's runtime states */
+       unsigned int tx_rate;   /* tx bandwidth limit in Mbps */
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+       bool link_forced;
+       bool link_up;           /* only valid if VF link is forced */
+#endif
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+       bool spoofchk;
+#endif
+};
+
+void i40e_free_vfs(struct i40e_pf *pf);
+#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)
+int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+#endif
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+                          u32 v_retval, u8 *msg, u16 msglen);
+int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+void i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
+
+/* VF configuration related iplink handlers */
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+                             int vf_id, u16 vlan_id, u8 qos);
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+                      int max_tx_rate);
+#else
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+#endif
+int i40e_ndo_enable_vf(struct net_device *netdev, int vf_id, bool enable);
+#ifdef IFLA_VF_MAX
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+                          int vf_id, struct ifla_vf_info *ivi);
+#ifdef HAVE_NDO_SET_VF_LINK_STATE
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+#endif
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
+#endif
+#endif
+
+void i40e_vc_notify_link_state(struct i40e_pf *pf);
+void i40e_vc_notify_reset(struct i40e_pf *pf);
+
+#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.c b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.c
new file mode 100644 (file)
index 0000000..f51540d
--- /dev/null
@@ -0,0 +1,2146 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+#include "kcompat.h"
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+/* From lib/vsprintf.c */
+#include <asm/div64.h>
+
+static int skip_atoi(const char **s)
+{
+       int i=0;
+
+       while (isdigit(**s))
+               i = i*10 + *((*s)++) - '0';
+       return i;
+}
+
+#define _kc_ZEROPAD    1               /* pad with zero */
+#define _kc_SIGN       2               /* unsigned/signed long */
+#define _kc_PLUS       4               /* show plus */
+#define _kc_SPACE      8               /* space if plus */
+#define _kc_LEFT       16              /* left justified */
+#define _kc_SPECIAL    32              /* 0x */
+#define _kc_LARGE      64              /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
+{
+       char c,sign,tmp[66];
+       const char *digits;
+       const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+       const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+       int i;
+
+       digits = (type & _kc_LARGE) ? large_digits : small_digits;
+       if (type & _kc_LEFT)
+               type &= ~_kc_ZEROPAD;
+       if (base < 2 || base > 36)
+               return 0;
+       c = (type & _kc_ZEROPAD) ? '0' : ' ';
+       sign = 0;
+       if (type & _kc_SIGN) {
+               if (num < 0) {
+                       sign = '-';
+                       num = -num;
+                       size--;
+               } else if (type & _kc_PLUS) {
+                       sign = '+';
+                       size--;
+               } else if (type & _kc_SPACE) {
+                       sign = ' ';
+                       size--;
+               }
+       }
+       if (type & _kc_SPECIAL) {
+               if (base == 16)
+                       size -= 2;
+               else if (base == 8)
+                       size--;
+       }
+       i = 0;
+       if (num == 0)
+               tmp[i++]='0';
+       else while (num != 0)
+               tmp[i++] = digits[do_div(num,base)];
+       if (i > precision)
+               precision = i;
+       size -= precision;
+       if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
+               while(size-->0) {
+                       if (buf <= end)
+                               *buf = ' ';
+                       ++buf;
+               }
+       }
+       if (sign) {
+               if (buf <= end)
+                       *buf = sign;
+               ++buf;
+       }
+       if (type & _kc_SPECIAL) {
+               if (base==8) {
+                       if (buf <= end)
+                               *buf = '0';
+                       ++buf;
+               } else if (base==16) {
+                       if (buf <= end)
+                               *buf = '0';
+                       ++buf;
+                       if (buf <= end)
+                               *buf = digits[33];
+                       ++buf;
+               }
+       }
+       if (!(type & _kc_LEFT)) {
+               while (size-- > 0) {
+                       if (buf <= end)
+                               *buf = c;
+                       ++buf;
+               }
+       }
+       while (i < precision--) {
+               if (buf <= end)
+                       *buf = '0';
+               ++buf;
+       }
+       while (i-- > 0) {
+               if (buf <= end)
+                       *buf = tmp[i];
+               ++buf;
+       }
+       while (size-- > 0) {
+               if (buf <= end)
+                       *buf = ' ';
+               ++buf;
+       }
+       return buf;
+}
+
+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+       int len;
+       unsigned long long num;
+       int i, base;
+       char *str, *end, c;
+       const char *s;
+
+       int flags;              /* flags to number() */
+
+       int field_width;        /* width of output field */
+       int precision;          /* min. # of digits for integers; max
+                                  number of chars for from string */
+       int qualifier;          /* 'h', 'l', or 'L' for integer fields */
+                               /* 'z' support added 23/7/1999 S.H.    */
+                               /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+       str = buf;
+       end = buf + size - 1;
+
+       if (end < buf - 1) {
+               end = ((void *) -1);
+               size = end - buf + 1;
+       }
+
+       for (; *fmt ; ++fmt) {
+               if (*fmt != '%') {
+                       if (str <= end)
+                               *str = *fmt;
+                       ++str;
+                       continue;
+               }
+
+               /* process flags */
+               flags = 0;
+               repeat:
+                       ++fmt;          /* this also skips first '%' */
+                       switch (*fmt) {
+                               case '-': flags |= _kc_LEFT; goto repeat;
+                               case '+': flags |= _kc_PLUS; goto repeat;
+                               case ' ': flags |= _kc_SPACE; goto repeat;
+                               case '#': flags |= _kc_SPECIAL; goto repeat;
+                               case '0': flags |= _kc_ZEROPAD; goto repeat;
+                       }
+
+               /* get field width */
+               field_width = -1;
+               if (isdigit(*fmt))
+                       field_width = skip_atoi(&fmt);
+               else if (*fmt == '*') {
+                       ++fmt;
+                       /* it's the next argument */
+                       field_width = va_arg(args, int);
+                       if (field_width < 0) {
+                               field_width = -field_width;
+                               flags |= _kc_LEFT;
+                       }
+               }
+
+               /* get the precision */
+               precision = -1;
+               if (*fmt == '.') {
+                       ++fmt;
+                       if (isdigit(*fmt))
+                               precision = skip_atoi(&fmt);
+                       else if (*fmt == '*') {
+                               ++fmt;
+                               /* it's the next argument */
+                               precision = va_arg(args, int);
+                       }
+                       if (precision < 0)
+                               precision = 0;
+               }
+
+               /* get the conversion qualifier */
+               qualifier = -1;
+               if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+                       qualifier = *fmt;
+                       ++fmt;
+               }
+
+               /* default base */
+               base = 10;
+
+               switch (*fmt) {
+                       case 'c':
+                               if (!(flags & _kc_LEFT)) {
+                                       while (--field_width > 0) {
+                                               if (str <= end)
+                                                       *str = ' ';
+                                               ++str;
+                                       }
+                               }
+                               c = (unsigned char) va_arg(args, int);
+                               if (str <= end)
+                                       *str = c;
+                               ++str;
+                               while (--field_width > 0) {
+                                       if (str <= end)
+                                               *str = ' ';
+                                       ++str;
+                               }
+                               continue;
+
+                       case 's':
+                               s = va_arg(args, char *);
+                               if (!s)
+                                       s = "<NULL>";
+
+                               len = strnlen(s, precision);
+
+                               if (!(flags & _kc_LEFT)) {
+                                       while (len < field_width--) {
+                                               if (str <= end)
+                                                       *str = ' ';
+                                               ++str;
+                                       }
+                               }
+                               for (i = 0; i < len; ++i) {
+                                       if (str <= end)
+                                               *str = *s;
+                                       ++str; ++s;
+                               }
+                               while (len < field_width--) {
+                                       if (str <= end)
+                                               *str = ' ';
+                                       ++str;
+                               }
+                               continue;
+
+                       case 'p':
+                               if (field_width == -1) {
+                                       field_width = 2*sizeof(void *);
+                                       flags |= _kc_ZEROPAD;
+                               }
+                               str = number(str, end,
+                                               (unsigned long) va_arg(args, void *),
+                                               16, field_width, precision, flags);
+                               continue;
+
+
+                       case 'n':
+                               /* FIXME:
+                               * What does C99 say about the overflow case here? */
+                               if (qualifier == 'l') {
+                                       long * ip = va_arg(args, long *);
+                                       *ip = (str - buf);
+                               } else if (qualifier == 'Z') {
+                                       size_t * ip = va_arg(args, size_t *);
+                                       *ip = (str - buf);
+                               } else {
+                                       int * ip = va_arg(args, int *);
+                                       *ip = (str - buf);
+                               }
+                               continue;
+
+                       case '%':
+                               if (str <= end)
+                                       *str = '%';
+                               ++str;
+                               continue;
+
+                               /* integer number formats - set up the flags and "break" */
+                       case 'o':
+                               base = 8;
+                               break;
+
+                       case 'X':
+                               flags |= _kc_LARGE;
+                       case 'x':
+                               base = 16;
+                               break;
+
+                       case 'd':
+                       case 'i':
+                               flags |= _kc_SIGN;
+                       case 'u':
+                               break;
+
+                       default:
+                               if (str <= end)
+                                       *str = '%';
+                               ++str;
+                               if (*fmt) {
+                                       if (str <= end)
+                                               *str = *fmt;
+                                       ++str;
+                               } else {
+                                       --fmt;
+                               }
+                               continue;
+               }
+               if (qualifier == 'L')
+                       num = va_arg(args, long long);
+               else if (qualifier == 'l') {
+                       num = va_arg(args, unsigned long);
+                       if (flags & _kc_SIGN)
+                               num = (signed long) num;
+               } else if (qualifier == 'Z') {
+                       num = va_arg(args, size_t);
+               } else if (qualifier == 'h') {
+                       num = (unsigned short) va_arg(args, int);
+                       if (flags & _kc_SIGN)
+                               num = (signed short) num;
+               } else {
+                       num = va_arg(args, unsigned int);
+                       if (flags & _kc_SIGN)
+                               num = (signed int) num;
+               }
+               str = number(str, end, num, base,
+                               field_width, precision, flags);
+       }
+       if (str <= end)
+               *str = '\0';
+       else if (size > 0)
+               /* don't write out a null byte if the buf size is zero */
+               *end = '\0';
+       /* the trailing null byte doesn't count towards the total
+       * ++str;
+       */
+       return str-buf;
+}
+
+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
+{
+       va_list args;
+       int i;
+
+       va_start(args, fmt);
+       i = _kc_vsnprintf(buf,size,fmt,args);
+       va_end(args);
+       return i;
+}
+#endif /* < 2.4.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+                 size_t size, int direction)
+{
+       return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+               PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+                 size_t size, int direction)
+{
+       return pci_map_single(dev, (void *)page_address(page) + offset, size,
+                             direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void
+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+                   int direction)
+{
+       return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+int
+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+       if (!pci_dma_supported(dev, mask))
+               return -EIO;
+       dev->dma_mask = mask;
+       return 0;
+}
+
+int
+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               if (pci_resource_len(dev, i) == 0)
+                       continue;
+
+               if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+                       if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+                               pci_release_regions(dev);
+                               return -EBUSY;
+                       }
+               } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+                       if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+                               pci_release_regions(dev);
+                               return -EBUSY;
+                       }
+               }
+       }
+       return 0;
+}
+
+void
+_kc_pci_release_regions(struct pci_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               if (pci_resource_len(dev, i) == 0)
+                       continue;
+
+               if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+                       release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+
+               else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+                       release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+       }
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *
+_kc_alloc_etherdev(int sizeof_priv)
+{
+       struct net_device *dev;
+       int alloc_size;
+
+       alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+       dev = kzalloc(alloc_size, GFP_KERNEL);
+       if (!dev)
+               return NULL;
+
+       if (sizeof_priv)
+               dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
+       dev->name[0] = '\0';
+       ether_setup(dev);
+
+       return dev;
+}
+
+int
+_kc_is_valid_ether_addr(u8 *addr)
+{
+       const char zaddr[6] = { 0, };
+
+       return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+int
+_kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+       return 0;
+}
+
+int
+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+       return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+                            int off, int size)
+{
+       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+       frag->page = page;
+       frag->page_offset = off;
+       frag->size = size;
+       skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+                            unsigned long offset)
+{
+       const unsigned long *p = addr + BITOP_WORD(offset);
+       unsigned long result = offset & ~(BITS_PER_LONG-1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= (~0UL << offset);
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+
+found_first:
+       tmp &= (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + ffs(tmp);
+}
+
+size_t _kc_strlcpy(char *dest, const char *src, size_t size)
+{
+       size_t ret = strlen(src);
+
+       if (size) {
+               size_t len = (ret >= size) ? size - 1 : ret;
+               memcpy(dest, src, len);
+               dest[len] = '\0';
+       }
+       return ret;
+}
+
+#ifndef do_div
+#if BITS_PER_LONG == 32
+uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
+{
+       uint64_t rem = *n;
+       uint64_t b = base;
+       uint64_t res, d = 1;
+       uint32_t high = rem >> 32;
+
+       /* Reduce the thing a bit first */
+       res = 0;
+       if (high >= base) {
+               high /= base;
+               res = (uint64_t) high << 32;
+               rem -= (uint64_t) (high*base) << 32;
+       }
+
+       while ((int64_t)b > 0 && b < rem) {
+               b = b+b;
+               d = d+d;
+       }
+
+       do {
+               if (rem >= b) {
+                       rem -= b;
+                       res += d;
+               }
+               b >>= 1;
+               d >>= 1;
+       } while (d);
+
+       *n = res;
+       return rem;
+}
+#endif /* BITS_PER_LONG == 32 */
+#endif /* do_div */
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
+{
+       va_list args;
+       int i;
+
+       va_start(args, fmt);
+       i = vsnprintf(buf, size, fmt, args);
+       va_end(args);
+       return (i >= size) ? (size - 1) : i;
+}
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+char *_kc_kstrdup(const char *s, unsigned int gfp)
+{
+       size_t len;
+       char *buf;
+
+       if (!s)
+               return NULL;
+
+       len = strlen(s) + 1;
+       buf = kmalloc(len, gfp);
+       if (buf)
+               memcpy(buf, s, len);
+       return buf;
+}
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+void *_kc_kzalloc(size_t size, int flags)
+{
+       void *ret = kmalloc(size, flags);
+       if (ret)
+               memset(ret, 0, size);
+       return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+int _kc_skb_pad(struct sk_buff *skb, int pad)
+{
+       int ntail;
+
+        /* If the skbuff is non linear tailroom is always zero.. */
+        if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
+               memset(skb->data+skb->len, 0, pad);
+               return 0;
+        }
+
+       ntail = skb->data_len + pad - (skb->end - skb->tail);
+       if (likely(skb_cloned(skb) || ntail > 0)) {
+               if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
+                       goto free_skb;
+       }
+
+#ifdef MAX_SKB_FRAGS
+       if (skb_is_nonlinear(skb) &&
+           !__pskb_pull_tail(skb, skb->data_len))
+               goto free_skb;
+
+#endif
+       memset(skb->data + skb->len, 0, pad);
+        return 0;
+
+free_skb:
+       kfree_skb(skb);
+       return -ENOMEM;
+}
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+int _kc_pci_save_state(struct pci_dev *pdev)
+{
+       struct adapter_struct *adapter = pci_get_drvdata(pdev);
+       int size = PCI_CONFIG_SPACE_LEN, i;
+       u16 pcie_cap_offset, pcie_link_status;
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+       /* no ->dev for 2.4 kernels */
+       WARN_ON(pdev->dev.driver_data == NULL);
+#endif
+       pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+       if (pcie_cap_offset) {
+               if (!pci_read_config_word(pdev,
+                                         pcie_cap_offset + PCIE_LINK_STATUS,
+                                         &pcie_link_status))
+               size = PCIE_CONFIG_SPACE_LEN;
+       }
+       pci_config_space_ich8lan();
+#ifdef HAVE_PCI_ERS
+       if (adapter->config_space == NULL)
+#else
+       WARN_ON(adapter->config_space != NULL);
+#endif
+               adapter->config_space = kmalloc(size, GFP_KERNEL);
+       if (!adapter->config_space) {
+               printk(KERN_ERR "Out of memory in pci_save_state\n");
+               return -ENOMEM;
+       }
+       for (i = 0; i < (size / 4); i++)
+               pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
+       return 0;
+}
+
+void _kc_pci_restore_state(struct pci_dev *pdev)
+{
+       struct adapter_struct *adapter = pci_get_drvdata(pdev);
+       int size = PCI_CONFIG_SPACE_LEN, i;
+       u16 pcie_cap_offset;
+       u16 pcie_link_status;
+
+       if (adapter->config_space != NULL) {
+               pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+               if (pcie_cap_offset &&
+                   !pci_read_config_word(pdev,
+                                         pcie_cap_offset + PCIE_LINK_STATUS,
+                                         &pcie_link_status))
+                       size = PCIE_CONFIG_SPACE_LEN;
+
+               pci_config_space_ich8lan();
+               for (i = 0; i < (size / 4); i++)
+               pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
+#ifndef HAVE_PCI_ERS
+               kfree(adapter->config_space);
+               adapter->config_space = NULL;
+#endif
+       }
+}
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+void _kc_free_netdev(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+
+       if (adapter->config_space != NULL)
+               kfree(adapter->config_space);
+#ifdef CONFIG_SYSFS
+       if (netdev->reg_state == NETREG_UNINITIALIZED) {
+               kfree((char *)netdev - netdev->padded);
+       } else {
+               BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
+               netdev->reg_state = NETREG_RELEASED;
+               class_device_put(&netdev->class_dev);
+       }
+#else
+       kfree((char *)netdev - netdev->padded);
+#endif
+}
+#endif
+
+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
+{
+       void *p;
+
+       p = kzalloc(len, gfp);
+       if (p)
+               memcpy(p, src, len);
+       return p;
+}
+#endif /* <= 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
+{
+       return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
+}
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+/* hexdump code taken from lib/hexdump.c */
+static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+                       int groupsize, unsigned char *linebuf,
+                       size_t linebuflen, bool ascii)
+{
+       const u8 *ptr = buf;
+       u8 ch;
+       int j, lx = 0;
+       int ascii_column;
+
+       if (rowsize != 16 && rowsize != 32)
+               rowsize = 16;
+
+       if (!len)
+               goto nil;
+       if (len > rowsize)              /* limit to one line at a time */
+               len = rowsize;
+       if ((len % groupsize) != 0)     /* no mixed size output */
+               groupsize = 1;
+
+       switch (groupsize) {
+       case 8: {
+               const u64 *ptr8 = buf;
+               int ngroups = len / groupsize;
+
+               for (j = 0; j < ngroups; j++)
+                       lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+                               "%s%16.16llx", j ? " " : "",
+                               (unsigned long long)*(ptr8 + j));
+               ascii_column = 17 * ngroups + 2;
+               break;
+       }
+
+       case 4: {
+               const u32 *ptr4 = buf;
+               int ngroups = len / groupsize;
+
+               for (j = 0; j < ngroups; j++)
+                       lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+                               "%s%8.8x", j ? " " : "", *(ptr4 + j));
+               ascii_column = 9 * ngroups + 2;
+               break;
+       }
+
+       case 2: {
+               const u16 *ptr2 = buf;
+               int ngroups = len / groupsize;
+
+               for (j = 0; j < ngroups; j++)
+                       lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+                               "%s%4.4x", j ? " " : "", *(ptr2 + j));
+               ascii_column = 5 * ngroups + 2;
+               break;
+       }
+
+       default:
+               for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
+                       ch = ptr[j];
+                       linebuf[lx++] = hex_asc(ch >> 4);
+                       linebuf[lx++] = hex_asc(ch & 0x0f);
+                       linebuf[lx++] = ' ';
+               }
+               if (j)
+                       lx--;
+
+               ascii_column = 3 * rowsize + 2;
+               break;
+       }
+       if (!ascii)
+               goto nil;
+
+       while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
+               linebuf[lx++] = ' ';
+       for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
+               linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
+                               : '.';
+nil:
+       linebuf[lx++] = '\0';
+}
+
+void _kc_print_hex_dump(const char *level,
+                       const char *prefix_str, int prefix_type,
+                       int rowsize, int groupsize,
+                       const void *buf, size_t len, bool ascii)
+{
+       const u8 *ptr = buf;
+       int i, linelen, remaining = len;
+       unsigned char linebuf[200];
+
+       if (rowsize != 16 && rowsize != 32)
+               rowsize = 16;
+
+       for (i = 0; i < len; i += rowsize) {
+               linelen = min(remaining, rowsize);
+               remaining -= rowsize;
+               _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+                               linebuf, sizeof(linebuf), ascii);
+
+               switch (prefix_type) {
+               case DUMP_PREFIX_ADDRESS:
+                       printk("%s%s%*p: %s\n", level, prefix_str,
+                               (int)(2 * sizeof(void *)), ptr + i, linebuf);
+                       break;
+               case DUMP_PREFIX_OFFSET:
+                       printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
+                       break;
+               default:
+                       printk("%s%s%s\n", level, prefix_str, linebuf);
+                       break;
+               }
+       }
+}
+
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifdef NAPI
+struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
+{
+       struct adapter_q_vector *q_vector = container_of(napi,
+                                                       struct adapter_q_vector,
+                                                       napi);
+       return &q_vector->poll_dev;
+}
+
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+       int work_done;
+       int work_to_do = min(*budget, netdev->quota);
+       /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
+       struct napi_struct *napi = netdev->priv;
+       work_done = napi->poll(napi, work_to_do);
+       *budget -= work_done;
+       netdev->quota -= work_done;
+       return (work_done >= work_to_do) ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+       struct pci_dev *parent = pdev->bus->self;
+       u16 link_state;
+       int pos;
+
+       if (!parent)
+               return;
+
+       pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+       if (pos) {
+               pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
+               link_state &= ~state;
+               pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
+       }
+}
+#endif /* < 2.6.26 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int i;
+
+       netif_stop_queue(netdev);
+       if (netif_is_multiqueue(netdev))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       netif_stop_subqueue(netdev, i);
+}
+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int i;
+
+       netif_wake_queue(netdev);
+       if (netif_is_multiqueue(netdev))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       netif_wake_subqueue(netdev, i);
+}
+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int i;
+
+       netif_start_queue(netdev);
+       if (netif_is_multiqueue(netdev))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       netif_start_subqueue(netdev, i);
+}
+#endif /* HAVE_TX_MQ */
+
+void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
+{
+       va_list args;
+
+       printk(KERN_WARNING "------------[ cut here ]------------\n");
+       printk(KERN_WARNING "WARNING: at %s:%d \n", file, line);
+       va_start(args, fmt);
+       vprintk(fmt, args);
+       va_end(args);
+
+       dump_stack();
+}
+#endif /* __VMKLNX__ */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+
+int
+_kc_pci_prepare_to_sleep(struct pci_dev *dev)
+{
+       pci_power_t target_state;
+       int error;
+
+       target_state = pci_choose_state(dev, PMSG_SUSPEND);
+
+       pci_enable_wake(dev, target_state, true);
+
+       error = pci_set_power_state(dev, target_state);
+
+       if (error)
+               pci_enable_wake(dev, target_state, false);
+
+       return error;
+}
+
+int
+_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+       int err;
+
+       err = pci_enable_wake(dev, PCI_D3cold, enable);
+       if (err)
+               goto out;
+
+       err = pci_enable_wake(dev, PCI_D3hot, enable);
+
+out:
+       return err;
+}
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
+{
+       u16 old_cmd, cmd;
+
+       pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
+       if (enable)
+               cmd = old_cmd | PCI_COMMAND_MASTER;
+       else
+               cmd = old_cmd & ~PCI_COMMAND_MASTER;
+       if (cmd != old_cmd) {
+               dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
+                       enable ? "enabling" : "disabling");
+               pci_write_config_word(pdev, PCI_COMMAND, cmd);
+       }
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
+       pdev->is_busmaster = enable;
+#endif
+}
+
+void _kc_pci_clear_master(struct pci_dev *dev)
+{
+       __kc_pci_set_master(dev, false);
+}
+#endif /* < 2.6.29 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev)
+{
+       int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+       struct pci_dev *vfdev;
+
+       /* loop through all ethernet devices starting at PF dev */
+       vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+       while (vfdev) {
+               if (vfdev->is_virtfn && vfdev->physfn == dev)
+                       num_vf++;
+
+               vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
+       }
+
+#endif
+       return num_vf;
+}
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+#ifdef HAVE_TX_MQ
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+{
+       unsigned int real_num = dev->real_num_tx_queues;
+       struct Qdisc *qdisc;
+       int i;
+
+       if (unlikely(txq > dev->num_tx_queues))
+               ;
+       else if (txq > real_num)
+               dev->real_num_tx_queues = txq;
+       else if ( txq < real_num) {
+               dev->real_num_tx_queues = txq;
+               for (i = txq; i < dev->num_tx_queues; i++) {
+                       qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+                       if (qdisc) {
+                               spin_lock_bh(qdisc_lock(qdisc));
+                               qdisc_reset(qdisc);
+                               spin_unlock_bh(qdisc_lock(qdisc));
+                       }
+               }
+       }
+}
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#endif /* HAVE_TX_MQ */
+
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+                                  const void __user *from, size_t count)
+{
+        loff_t pos = *ppos;
+        size_t res;
+
+        if (pos < 0)
+                return -EINVAL;
+        if (pos >= available || !count)
+                return 0;
+        if (count > available - pos)
+                count = available - pos;
+        res = copy_from_user(to + pos, from, count);
+        if (res == count)
+                return -EFAULT;
+        count -= res;
+        *ppos = pos + count;
+        return count;
+}
+
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+static const u32 _kc_flags_dup_features =
+       (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
+
+u32 _kc_ethtool_op_get_flags(struct net_device *dev)
+{
+       return dev->features & _kc_flags_dup_features;
+}
+
+int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
+{
+       if (data & ~supported)
+               return -EINVAL;
+
+       dev->features = ((dev->features & ~_kc_flags_dup_features) |
+                        (data & _kc_flags_dup_features));
+       return 0;
+}
+#endif /* < 2.6.36 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#include <net/ip.h>
+#include <linux/pkt_sched.h>
+
+u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb,
+                     u16 num_tx_queues)
+{
+       u32 hash;
+       u16 qoffset = 0;
+       u16 qcount = num_tx_queues;
+
+       if (skb_rx_queue_recorded(skb)) {
+               hash = skb_get_rx_queue(skb);
+               while (unlikely(hash >= num_tx_queues))
+                       hash -= num_tx_queues;
+               return hash;
+       }
+
+       if (skb->sk && skb->sk->sk_hash)
+               hash = skb->sk->sk_hash;
+       else
+#ifdef NETIF_F_RXHASH
+               hash = (__force u16) skb->protocol ^ skb->rxhash;
+#else
+               hash = skb->protocol;
+#endif
+
+       hash = jhash_1word(hash, _kc_hashrnd);
+
+       return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+}
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+
+u8 _kc_netdev_get_num_tc(struct net_device *dev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       if (pf->flags & I40E_FLAG_DCB_ENABLED)
+               return vsi->tc_config.numtc;
+
+       return 0;
+}
+
+int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+
+       if (num_tc > I40E_MAX_TRAFFIC_CLASS)
+               return -EINVAL;
+
+       vsi->tc_config.numtc = num_tc;
+
+       return 0;
+}
+
+u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+       return dcbcfg->etscfg.prioritytable[up];
+}
+
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#endif /* < 2.6.39 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+                        int off, int size, unsigned int truesize)
+{
+       skb_fill_page_desc(skb, i, page, off, size);
+       skb->len += size;
+       skb->data_len += size;
+       skb->truesize += truesize;
+}
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+int _kc_simple_open(struct inode *inode, struct file *file)
+{
+        if (inode->i_private)
+                file->private_data = inode->i_private;
+
+        return 0;
+}
+#endif /* SLE_VERSION < 11,3,0 */
+
+#endif /* < 3.4.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+static inline int __kc_pcie_cap_version(struct pci_dev *dev)
+{
+       int pos;
+       u16 reg16;
+
+       pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+       if (!pos)
+               return 0;
+       pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
+       return reg16 & PCI_EXP_FLAGS_VERS;
+}
+
+static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
+{
+       return true;
+}
+
+static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
+{
+       int type = pci_pcie_type(dev);
+
+       return __kc_pcie_cap_version(dev) > 1 ||
+              type == PCI_EXP_TYPE_ROOT_PORT ||
+              type == PCI_EXP_TYPE_ENDPOINT ||
+              type == PCI_EXP_TYPE_LEG_END;
+}
+
+static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
+{
+       int type = pci_pcie_type(dev);
+       int pos;
+       u16 pcie_flags_reg;
+
+       pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+       if (!pos)
+               return false;
+       pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
+
+       return __kc_pcie_cap_version(dev) > 1 ||
+              type == PCI_EXP_TYPE_ROOT_PORT ||
+              (type == PCI_EXP_TYPE_DOWNSTREAM &&
+               pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
+}
+
+static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
+{
+       int type = pci_pcie_type(dev);
+
+       return __kc_pcie_cap_version(dev) > 1 ||
+              type == PCI_EXP_TYPE_ROOT_PORT ||
+              type == PCI_EXP_TYPE_RC_EC;
+}
+
+static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
+{
+       if (!pci_is_pcie(dev))
+               return false;
+
+       switch (pos) {
+       case PCI_EXP_FLAGS_TYPE:
+               return true;
+       case PCI_EXP_DEVCAP:
+       case PCI_EXP_DEVCTL:
+       case PCI_EXP_DEVSTA:
+               return __kc_pcie_cap_has_devctl(dev);
+       case PCI_EXP_LNKCAP:
+       case PCI_EXP_LNKCTL:
+       case PCI_EXP_LNKSTA:
+               return __kc_pcie_cap_has_lnkctl(dev);
+       case PCI_EXP_SLTCAP:
+       case PCI_EXP_SLTCTL:
+       case PCI_EXP_SLTSTA:
+               return __kc_pcie_cap_has_sltctl(dev);
+       case PCI_EXP_RTCTL:
+       case PCI_EXP_RTCAP:
+       case PCI_EXP_RTSTA:
+               return __kc_pcie_cap_has_rtctl(dev);
+       case PCI_EXP_DEVCAP2:
+       case PCI_EXP_DEVCTL2:
+       case PCI_EXP_LNKCAP2:
+       case PCI_EXP_LNKCTL2:
+       case PCI_EXP_LNKSTA2:
+               return __kc_pcie_cap_version(dev) > 1;
+       default:
+               return false;
+       }
+}
+
+/*
+ * Note that these accessor functions are only for the "PCI Express
+ * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
+ */
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
+{
+       int ret;
+
+       *val = 0;
+       if (pos & 1)
+               return -EINVAL;
+
+       if (__kc_pcie_capability_reg_implemented(dev, pos)) {
+               ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
+               /*
+                * Reset *val to 0 if pci_read_config_word() fails, it may
+                * have been written as 0xFFFF if hardware error happens
+                * during pci_read_config_word().
+                */
+               if (ret)
+                       *val = 0;
+               return ret;
+       }
+
+       /*
+        * For Functions that do not implement the Slot Capabilities,
+        * Slot Status, and Slot Control registers, these spaces must
+        * be hardwired to 0b, with the exception of the Presence Detect
+        * State bit in the Slot Status register of Downstream Ports,
+        * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
+        */
+       if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+           pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+               *val = PCI_EXP_SLTSTA_PDS;
+       }
+
+       return 0;
+}
+
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
+{
+       if (pos & 1)
+               return -EINVAL;
+
+       if (!__kc_pcie_capability_reg_implemented(dev, pos))
+               return 0;
+
+       return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
+}
+
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+                                           u16 clear, u16 set)
+{
+       int ret;
+       u16 val;
+
+       ret = __kc_pcie_capability_read_word(dev, pos, &val);
+       if (!ret) {
+               val &= ~clear;
+               val |= set;
+               ret = __kc_pcie_capability_write_word(dev, pos, val);
+       }
+
+       return ret;
+}
+
+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
+                                            u16 clear)
+{
+       return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
+}
+#endif /* < 3.7.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+#ifdef CONFIG_XPS
+#if NR_CPUS < 64
+#define _KC_MAX_XPS_CPUS       NR_CPUS
+#else
+#define _KC_MAX_XPS_CPUS       64
+#endif
+
+/*
+ * netdev_queue sysfs structures and functions.
+ */
+struct _kc_netdev_queue_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct netdev_queue *queue,
+           struct _kc_netdev_queue_attribute *attr, char *buf);
+       ssize_t (*store)(struct netdev_queue *queue,
+           struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len);
+};
+
+#define to_kc_netdev_queue_attr(_attr) container_of(_attr,             \
+    struct _kc_netdev_queue_attribute, attr)
+
+int __kc_netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
+                            u16 index)
+{
+       struct netdev_queue *txq = netdev_get_tx_queue(dev, index);
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+       /* Redhat requires some odd extended netdev structures */
+       struct netdev_tx_queue_extended *txq_ext =
+                                       netdev_extended(dev)->_tx_ext + index;
+       struct kobj_type *ktype = txq_ext->kobj.ktype;
+#else
+       struct kobj_type *ktype = txq->kobj.ktype;
+#endif
+       struct _kc_netdev_queue_attribute *xps_attr;
+       struct attribute *attr = NULL;
+       int i, len, err;
+#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9)
+       char buf[_KC_XPS_BUFLEN];
+
+       if (!ktype)
+               return -ENOMEM;
+
+       /* attempt to locate the XPS attribute in the Tx queue */
+       for (i = 0; (attr = ktype->default_attrs[i]); i++) {
+               if (!strcmp("xps_cpus", attr->name))
+                       break;
+       }
+
+       /* if we did not find it return an error */
+       if (!attr)
+               return -EINVAL;
+
+       /* copy the mask into a string */
+       len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN,
+                              cpumask_bits(mask), _KC_MAX_XPS_CPUS);
+       if (!len)
+               return -ENOMEM;
+
+       xps_attr = to_kc_netdev_queue_attr(attr);
+
+       /* Store the XPS value using the SYSFS store call */
+       err = xps_attr->store(txq, xps_attr, buf, len);
+
+       /* we only had an error on err < 0 */
+       return (err < 0) ? err : 0;
+}
+#endif /* CONFIG_XPS */
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+       struct xps_dev_maps *dev_maps;
+       struct xps_map *map;
+       int queue_index = -1;
+
+       rcu_read_lock();
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+       /* Redhat requires some odd extended netdev structures */
+       dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps);
+#else
+       dev_maps = rcu_dereference(dev->xps_maps);
+#endif
+       if (dev_maps) {
+               map = rcu_dereference(
+                   dev_maps->cpu_map[raw_smp_processor_id()]);
+               if (map) {
+                       if (map->len == 1)
+                               queue_index = map->queues[0];
+                       else {
+                               u32 hash;
+                               if (skb->sk && skb->sk->sk_hash)
+                                       hash = skb->sk->sk_hash;
+                               else
+                                       hash = (__force u16) skb->protocol ^
+                                           skb->rxhash;
+                               hash = jhash_1word(hash, _kc_hashrnd);
+                               queue_index = map->queues[
+                                   ((u64)hash * map->len) >> 32];
+                       }
+                       if (unlikely(queue_index >= dev->real_num_tx_queues))
+                               queue_index = -1;
+               }
+       }
+       rcu_read_unlock();
+
+       return queue_index;
+#else
+       return -1;
+#endif
+}
+
+u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+       int queue_index = sk_tx_queue_get(sk);
+       int new_index;
+
+       if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) {
+#ifdef CONFIG_XPS
+               if (!skb->ooo_okay)
+#endif
+                       return queue_index;
+       }
+
+       new_index = kc_get_xps_queue(dev, skb);
+       if (new_index < 0)
+               new_index = skb_tx_hash(dev, skb);
+
+       if (queue_index != new_index && sk) {
+               struct dst_entry *dst =
+                           rcu_dereference(sk->sk_dst_cache);
+
+               if (dst && skb_dst(skb) == dst)
+                       sk_tx_queue_set(sk, new_index);
+
+       }
+
+       return new_index;
+}
+
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#endif /* 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                         struct net_device *dev, const unsigned char *addr,
+                         u16 flags)
+#else
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+                         unsigned char *addr, u16 flags)
+#endif
+{
+       int err = -EINVAL;
+
+       /* If aging addresses are supported device will need to
+        * implement its own handler for this.
+        */
+       if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+               pr_info("%s: FDB only supports static addresses\n", dev->name);
+               return err;
+       }
+
+       if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+               err = dev_uc_add_excl(dev, addr);
+       else if (is_multicast_ether_addr(addr))
+               err = dev_mc_add_excl(dev, addr);
+
+       /* Only return duplicate errors if NLM_F_EXCL is set */
+       if (err == -EEXIST && !(flags & NLM_F_EXCL))
+               err = 0;
+
+       return err;
+}
+
+#ifdef USE_CONST_DEV_UC_CHAR
+#ifdef HAVE_FDB_DEL_NLATTR
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                         struct net_device *dev, const unsigned char *addr)
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+                         const unsigned char *addr)
+#endif
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+                         unsigned char *addr)
+#endif
+{
+       int err = -EINVAL;
+
+       /* If aging addresses are supported device will need to
+        * implement its own handler for this.
+        */
+       if (!(ndm->ndm_state & NUD_PERMANENT)) {
+               pr_info("%s: FDB only supports static addresses\n", dev->name);
+               return err;
+       }
+
+       if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+               err = dev_uc_del(dev, addr);
+       else if (is_multicast_ether_addr(addr))
+               err = dev_mc_del(dev, addr);
+
+       return err;
+}
+
+#endif /* HAVE_FDB_OPS */
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev)
+{
+       unsigned int vfs_assigned = 0;
+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
+       int pos;
+       struct pci_dev *vfdev;
+       unsigned short dev_id;
+
+       /* only search if we are a PF */
+       if (!dev->is_physfn)
+               return 0;
+
+       /* find SR-IOV capability */
+       pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+       if (!pos)
+               return 0;
+
+       /*
+        * determine the device ID for the VFs, the vendor ID will be the
+        * same as the PF so there is no need to check for that one
+        */
+       pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
+
+       /* loop through all the VFs to see if we own any that are assigned */
+       vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+       while (vfdev) {
+               /*
+                * It is considered assigned if it is a virtual function with
+                * our dev as the physical function and the assigned bit is set
+                */
+               if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+                   (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+                       vfs_assigned++;
+
+               vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
+       }
+
+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
+       return vfs_assigned;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* 3.10.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
+const unsigned char pcie_link_speed[] = {
+       PCI_SPEED_UNKNOWN,      /* 0 */
+       PCIE_SPEED_2_5GT,       /* 1 */
+       PCIE_SPEED_5_0GT,       /* 2 */
+       PCIE_SPEED_8_0GT,       /* 3 */
+       PCI_SPEED_UNKNOWN,      /* 4 */
+       PCI_SPEED_UNKNOWN,      /* 5 */
+       PCI_SPEED_UNKNOWN,      /* 6 */
+       PCI_SPEED_UNKNOWN,      /* 7 */
+       PCI_SPEED_UNKNOWN,      /* 8 */
+       PCI_SPEED_UNKNOWN,      /* 9 */
+       PCI_SPEED_UNKNOWN,      /* A */
+       PCI_SPEED_UNKNOWN,      /* B */
+       PCI_SPEED_UNKNOWN,      /* C */
+       PCI_SPEED_UNKNOWN,      /* D */
+       PCI_SPEED_UNKNOWN,      /* E */
+       PCI_SPEED_UNKNOWN       /* F */
+};
+
+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+                              enum pcie_link_width *width)
+{
+       int ret;
+
+       *speed = PCI_SPEED_UNKNOWN;
+       *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+       while (dev) {
+               u16 lnksta;
+               enum pci_bus_speed next_speed;
+               enum pcie_link_width next_width;
+
+               ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+               if (ret)
+                       return ret;
+
+               next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+               next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+                       PCI_EXP_LNKSTA_NLW_SHIFT;
+
+               if (next_speed < *speed)
+                       *speed = next_speed;
+
+               if (next_width < *width)
+                       *width = next_width;
+
+               dev = dev->bus->self;
+       }
+
+       return 0;
+}
+
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
+int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+       int err = dma_set_mask(dev, mask);
+
+       if (!err)
+               /* coherent mask for the same size will always succeed if
+                * dma_set_mask does. However we store the error anyways, due
+                * to some kernels which use gcc's warn_unused_result on their
+                * definition of dma_set_coherent_mask.
+                */
+               err = dma_set_coherent_mask(dev, mask);
+       return err;
+}
+
+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
+{
+       /* Set of random keys generated using kernel random number generator */
+       static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62,
+                               0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F,
+                               0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95,
+                               0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC,
+                               0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41,
+                               0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A,
+                               0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20};
+
+       BUG_ON(len > NETDEV_RSS_KEY_LEN);
+       memcpy(buffer, seed, len);
+}
+#endif /* 3.13.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
+int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+                              int minvec, int maxvec)
+{
+        int nvec = maxvec;
+        int rc;
+
+        if (maxvec < minvec)
+                return -ERANGE;
+
+        do {
+                rc = pci_enable_msix(dev, entries, nvec);
+                if (rc < 0) {
+                        return rc;
+                } else if (rc > 0) {
+                        if (rc < minvec)
+                                return -ENOSPC;
+                        nvec = rc;
+                }
+        } while (rc);
+
+        return nvec;
+}
+#endif /* 3.14.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) )
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list,
+               struct net_device *dev,
+               int (*sync)(struct net_device *, const unsigned char *),
+               int (*unsync)(struct net_device *, const unsigned char *))
+{
+       struct netdev_hw_addr *ha, *tmp;
+       int err;
+
+       /* first go through and flush out any stale entries */
+       list_for_each_entry_safe(ha, tmp, &list->list, list) {
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+               if (!ha->synced || ha->refcount != 1)
+#else
+               if (!ha->sync_cnt || ha->refcount != 1)
+#endif
+                       continue;
+
+               if (unsync && unsync(dev, ha->addr))
+                       continue;
+
+               list_del_rcu(&ha->list);
+               kfree_rcu(ha, rcu_head);
+               list->count--;
+       }
+
+       /* go through and sync new entries to the list */
+       list_for_each_entry_safe(ha, tmp, &list->list, list) {
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+               if (ha->synced)
+#else
+               if (ha->sync_cnt)
+#endif
+                       continue;
+
+               err = sync(dev, ha->addr);
+               if (err)
+                       return err;
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+               ha->synced = true;
+#else
+               ha->sync_cnt++;
+#endif
+               ha->refcount++;
+       }
+
+       return 0;
+}
+
+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
+               struct net_device *dev,
+               int (*unsync)(struct net_device *, const unsigned char *))
+{
+       struct netdev_hw_addr *ha, *tmp;
+
+       list_for_each_entry_safe(ha, tmp, &list->list, list) {
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+               if (!ha->synced)
+#else
+               if (!ha->sync_cnt)
+#endif
+                       continue;
+
+               if (unsync && unsync(dev, ha->addr))
+                       continue;
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+               ha->synced = false;
+#else
+               ha->sync_cnt--;
+#endif
+               if (--ha->refcount)
+                       continue;
+
+               list_del_rcu(&ha->list);
+               kfree_rcu(ha, rcu_head);
+               list->count--;
+       }
+}
+
+#endif /* NETDEV_HW_ADDR_T_UNICAST  */
+#ifndef NETDEV_HW_ADDR_T_MULTICAST
+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count,
+               struct net_device *dev,
+               int (*sync)(struct net_device *, const unsigned char *),
+               int (*unsync)(struct net_device *, const unsigned char *))
+{
+       struct dev_addr_list *da, **next = list;
+       int err;
+
+       /* first go through and flush out any stale entries */
+       while ((da = *next) != NULL) {
+               if (da->da_synced && da->da_users == 1) {
+                       if (!unsync || !unsync(dev, da->da_addr)) {
+                               *next = da->next;
+                               kfree(da);
+                               (*count)--;
+                               continue;
+                       }
+               }
+               next = &da->next;
+       }
+
+       /* go through and sync new entries to the list */
+       for (da = *list; da != NULL; da = da->next) {
+               if (da->da_synced)
+                       continue;
+
+               err = sync(dev, da->da_addr);
+               if (err)
+                       return err;
+
+               da->da_synced++;
+               da->da_users++;
+       }
+
+       return 0;
+}
+
+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
+               struct net_device *dev,
+               int (*unsync)(struct net_device *, const unsigned char *))
+{
+       struct dev_addr_list *da;
+
+       while ((da = *list) != NULL) {
+               if (da->da_synced) {
+                       if (!unsync || !unsync(dev, da->da_addr)) {
+                               da->da_synced--;
+                               if (--da->da_users == 0) {
+                                       *list = da->next;
+                                       kfree(da);
+                                       (*count)--;
+                                       continue;
+                               }
+                       }
+               }
+               list = &da->next;
+       }
+}
+#endif /* NETDEV_HW_ADDR_T_MULTICAST  */
+#endif /* HAVE_SET_RX_MODE */
+#endif /* 3.16.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
+#ifndef NO_PTP_SUPPORT
+static void __kc_sock_efree(struct sk_buff *skb)
+{
+       sock_put(skb->sk);
+}
+
+struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+       struct sk_buff *clone;
+
+       if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
+               return NULL;
+
+       clone = skb_clone(skb, GFP_ATOMIC);
+       if (!clone) {
+               sock_put(sk);
+               return NULL;
+       }
+
+       clone->sk = sk;
+       clone->destructor = __kc_sock_efree;
+
+       return clone;
+}
+
+void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
+                                   struct skb_shared_hwtstamps *hwtstamps)
+{
+       struct sock_exterr_skb *serr;
+       struct sock *sk = skb->sk;
+       int err;
+
+       sock_hold(sk);
+
+       *skb_hwtstamps(skb) = *hwtstamps;
+
+       serr = SKB_EXT_ERR(skb);
+       memset(serr, 0, sizeof(*serr));
+       serr->ee.ee_errno = ENOMSG;
+       serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+
+       err = sock_queue_err_skb(sk, skb);
+       if (err)
+               kfree_skb(skb);
+
+       sock_put(sk);
+}
+#endif
+
+/* include headers needed for get_headlen function */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#include <scsi/fc/fc_fcoe.h>
+#endif
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+
+unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len)
+{
+       union {
+               unsigned char *network;
+               /* l2 headers */
+               struct ethhdr *eth;
+               struct vlan_hdr *vlan;
+               /* l3 headers */
+               struct iphdr *ipv4;
+               struct ipv6hdr *ipv6;
+       } hdr;
+       __be16 proto;
+       u8 nexthdr = 0; /* default to not TCP */
+       u8 hlen;
+
+       /* this should never happen, but better safe than sorry */
+       if (max_len < ETH_HLEN)
+               return max_len;
+
+       /* initialize network frame pointer */
+       hdr.network = data;
+
+       /* set first protocol and move network header forward */
+       proto = hdr.eth->h_proto;
+       hdr.network += ETH_HLEN;
+
+again:
+       switch (proto) {
+       /* handle any vlan tag if present */
+       case __constant_htons(ETH_P_8021AD):
+       case __constant_htons(ETH_P_8021Q):
+               if ((hdr.network - data) > (max_len - VLAN_HLEN))
+                       return max_len;
+
+               proto = hdr.vlan->h_vlan_encapsulated_proto;
+               hdr.network += VLAN_HLEN;
+               goto again;
+       /* handle L3 protocols */
+       case __constant_htons(ETH_P_IP):
+               if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+                       return max_len;
+
+               /* access ihl as a u8 to avoid unaligned access on ia64 */
+               hlen = (hdr.network[0] & 0x0F) << 2;
+
+               /* verify hlen meets minimum size requirements */
+               if (hlen < sizeof(struct iphdr))
+                       return hdr.network - data;
+
+               /* record next protocol if header is present */
+               if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+                       nexthdr = hdr.ipv4->protocol;
+
+               hdr.network += hlen;
+               break;
+#ifdef NETIF_F_TSO6
+       case __constant_htons(ETH_P_IPV6):
+               if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+                       return max_len;
+
+               /* record next protocol */
+               nexthdr = hdr.ipv6->nexthdr;
+               hdr.network += sizeof(struct ipv6hdr);
+               break;
+#endif /* NETIF_F_TSO6 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+       case __constant_htons(ETH_P_FCOE):
+               hdr.network += FCOE_HEADER_LEN;
+               break;
+#endif
+       default:
+               return hdr.network - data;
+       }
+
+       /* finally sort out L4 */
+       switch (nexthdr) {
+       case IPPROTO_TCP:
+               if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+                       return max_len;
+
+               /* access doff as a u8 to avoid unaligned access on ia64 */
+               hdr.network += max_t(u8, sizeof(struct tcphdr),
+                                    (hdr.network[12] & 0xF0) >> 2);
+
+               break;
+       case IPPROTO_UDP:
+       case IPPROTO_UDPLITE:
+               hdr.network += sizeof(struct udphdr);
+               break;
+#ifdef HAVE_SCTP
+       case IPPROTO_SCTP:
+               hdr.network += sizeof(struct sctphdr);
+               break;
+#endif
+       }
+
+       /*
+        * If everything has gone correctly hdr.network should be the
+        * data section of the packet and will be the end of the header.
+        * If not then it probably represents the end of the last recognized
+        * header.
+        */
+       return min_t(unsigned int, hdr.network - data, max_len);
+}
+
+#endif /* < 3.18.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
+#ifdef HAVE_NET_GET_RANDOM_ONCE
+static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN];
+
+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
+{
+       BUG_ON(len > sizeof(__kc_netdev_rss_key));
+       net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key));
+       memcpy(buffer, __kc_netdev_rss_key, len);
+}
+#endif
+#endif
diff --git a/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.h b/i40e-dkms-1.3.47/i40e-1.3.47/src/i40e/kcompat.h
new file mode 100644 (file)
index 0000000..1d43af6
--- /dev/null
@@ -0,0 +1,4634 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */
+#ifndef UTS_RELEASE
+/* utsrelease.h changed locations in 2.6.33 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
+#include <linux/utsrelease.h>
+#else
+#include <generated/utsrelease.h>
+#endif
+#endif
+
+/* NAPI enable/disable flags here */
+#define NAPI
+
+#define adapter_struct i40e_pf
+#define adapter_q_vector i40e_q_vector
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#ifndef CONFIG_I40E_NAPI
+#define CONFIG_I40E_NAPI
+#endif
+#else
+#undef CONFIG_I40E_NAPI
+#endif /* NAPI */
+
+/* Dynamic LTR and deeper C-State support disable/enable */
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#endif /* DISABLE_PACKET_SPLIT */
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+struct msix_entry {
+       u16 vector; /* kernel uses to write allocated vector */
+       u16 entry;  /* driver uses to specify entry, OS writes */
+};
+#endif
+#undef pci_enable_msi
+#define pci_enable_msi(a) -ENOTSUPP
+#undef pci_disable_msi
+#define pci_disable_msi(a) do {} while (0)
+#undef pci_enable_msix
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#undef pci_disable_msix
+#define pci_disable_msix(a) do {} while (0)
+#define msi_remove_pci_irq_vectors(a) do {} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#else
+#define _Bool char
+#endif
+
+#undef __always_unused
+#define __always_unused __attribute__((__unused__))
+
+#undef __maybe_unused
+#define __maybe_unused __attribute__((__unused__))
+
+/* kernels less than 2.4.14 don't have this */
+#ifndef ETH_P_8021Q
+#define ETH_P_8021Q 0x8100
+#endif
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK  0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef uninitialized_var
+#define uninitialized_var(x) x = x
+#endif
+
+#ifndef PCIE_LINK_STATE_L0S
+#define PCIE_LINK_STATE_L0S 1
+#endif
+#ifndef PCIE_LINK_STATE_L1
+#define PCIE_LINK_STATE_L1 2
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#define free_netdev(x) kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+   just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef NETIF_F_GRO
+#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
+               vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
+#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
+#endif
+
+#ifndef NETIF_F_SCTP_CSUM
+#define NETIF_F_SCTP_CSUM 0
+#endif
+
+#ifndef NETIF_F_LRO
+#define NETIF_F_LRO (1 << 15)
+#endif
+
+#ifndef NETIF_F_NTUPLE
+#define NETIF_F_NTUPLE (1 << 27)
+#endif
+
+#ifndef NETIF_F_ALL_FCOE
+#define NETIF_F_ALL_FCOE       (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
+                                NETIF_F_FSO)
+#endif
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132
+#endif
+
+#ifndef IPPROTO_UDPLITE
+#define IPPROTO_UDPLITE 136
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef MII_RESV1
+#define MII_RESV1              0x17            /* Reserved...          */
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+       .vendor = (vend), .device = (dev), \
+       .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef node_online
+#define node_online(node) ((node) == 0)
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef cpu_online
+#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+#ifndef DCA_GET_TAG_TWO_ARGS
+#define dca3_get_tag(a,b) dca_get_tag(b)
+#endif
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#if defined(__i386__) || defined(__x86_64__)
+#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#endif
+#endif
+
+/* taken from 2.6.24 definition in linux/kernel.h */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(x,a)         (((x) % ((typeof(x))(a))) == 0)
+#endif
+
+#ifdef IS_ENABLED
+#undef IS_ENABLED
+#undef __ARG_PLACEHOLDER_1
+#undef config_enabled
+#undef _config_enabled
+#undef __config_enabled
+#undef ___config_enabled
+#endif
+
+#define __ARG_PLACEHOLDER_1 0,
+#define config_enabled(cfg) _config_enabled(cfg)
+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+#define ___config_enabled(__ignored, val, ...) val
+
+#define IS_ENABLED(option) \
+       (config_enabled(option) || config_enabled(option##_MODULE))
+
+#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
+struct _kc_vlan_ethhdr {
+       unsigned char   h_dest[ETH_ALEN];
+       unsigned char   h_source[ETH_ALEN];
+       __be16          h_vlan_proto;
+       __be16          h_vlan_TCI;
+       __be16          h_vlan_encapsulated_proto;
+};
+#define vlan_ethhdr _kc_vlan_ethhdr
+struct _kc_vlan_hdr {
+       __be16          h_vlan_TCI;
+       __be16          h_vlan_encapsulated_proto;
+};
+#define vlan_hdr _kc_vlan_hdr
+#define vlan_tx_tag_present(_skb) 0
+#define vlan_tx_tag_get(_skb) 0
+#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
+
+#ifndef VLAN_PRIO_SHIFT
+#define VLAN_PRIO_SHIFT 13
+#endif
+
+#ifndef __GFP_COLD
+#define __GFP_COLD 0
+#endif
+
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
+#ifndef IP_OFFSET
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+#endif
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+       u32 cmd;
+       char driver[32];
+       char version[32];
+       char fw_version[32];
+       char bus_info[32];
+       char reserved1[32];
+       char reserved2[16];
+       u32 n_stats;
+       u32 testinfo_len;
+       u32 eedump_len;
+       u32 regdump_len;
+};
+
+struct ethtool_stats {
+       u32 cmd;
+       u32 n_stats;
+       u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+       ETH_SS_TEST             = 0,
+       ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+       u32 cmd;            /* ETHTOOL_GSTRINGS */
+       u32 string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
+       u32 len;            /* number of strings in the string set */
+       u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+       ETH_TEST_FL_OFFLINE     = (1 << 0),
+       ETH_TEST_FL_FAILED      = (1 << 1),
+};
+struct ethtool_test {
+       u32 cmd;
+       u32 flags;
+       u32 reserved;
+       u32 len;
+       u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+       u32 cmd;
+       u32 magic;
+       u32 offset;
+       u32 len;
+       u8 data[0];
+};
+
+struct ethtool_value {
+       u32 cmd;
+       u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GWOL
+#define ETHTOOL_GWOL 0x5
+#define ETHTOOL_SWOL 0x6
+#define SOPASS_MAX      6
+struct ethtool_wolinfo {
+       u32 cmd;
+       u32 supported;
+       u32 wolopts;
+       u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+};
+#endif /* ETHTOOL_GWOL */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS          0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+       u32 cmd;
+       u32 version; /* driver-specific, indicates different chips/revs */
+       u32 len; /* bytes */
+       u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL                0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL                0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST       0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK          0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM                0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM                0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE      0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+       u32     cmd;    /* ETHTOOL_{G,S}COALESCE */
+
+       /* How many usecs to delay an RX interrupt after
+        * a packet arrives.  If 0, only rx_max_coalesced_frames
+        * is used.
+        */
+       u32     rx_coalesce_usecs;
+
+       /* How many packets to delay an RX interrupt after
+        * a packet arrives.  If 0, only rx_coalesce_usecs is
+        * used.  It is illegal to set both usecs and max frames
+        * to zero as this would cause RX interrupts to never be
+        * generated.
+        */
+       u32     rx_max_coalesced_frames;
+
+       /* Same as above two parameters, except that these values
+        * apply while an IRQ is being serviced by the host.  Not
+        * all cards support this feature and the values are ignored
+        * in that case.
+        */
+       u32     rx_coalesce_usecs_irq;
+       u32     rx_max_coalesced_frames_irq;
+
+       /* How many usecs to delay a TX interrupt after
+        * a packet is sent.  If 0, only tx_max_coalesced_frames
+        * is used.
+        */
+       u32     tx_coalesce_usecs;
+
+       /* How many packets to delay a TX interrupt after
+        * a packet is sent.  If 0, only tx_coalesce_usecs is
+        * used.  It is illegal to set both usecs and max frames
+        * to zero as this would cause TX interrupts to never be
+        * generated.
+        */
+       u32     tx_max_coalesced_frames;
+
+       /* Same as above two parameters, except that these values
+        * apply while an IRQ is being serviced by the host.  Not
+        * all cards support this feature and the values are ignored
+        * in that case.
+        */
+       u32     tx_coalesce_usecs_irq;
+       u32     tx_max_coalesced_frames_irq;
+
+       /* How many usecs to delay in-memory statistics
+        * block updates.  Some drivers do not have an in-memory
+        * statistic block, and in such cases this value is ignored.
+        * This value must not be zero.
+        */
+       u32     stats_block_coalesce_usecs;
+
+       /* Adaptive RX/TX coalescing is an algorithm implemented by
+        * some drivers to improve latency under low packet rates and
+        * improve throughput under high packet rates.  Some drivers
+        * only implement one of RX or TX adaptive coalescing.  Anything
+        * not implemented by the driver causes these values to be
+        * silently ignored.
+        */
+       u32     use_adaptive_rx_coalesce;
+       u32     use_adaptive_tx_coalesce;
+
+       /* When the packet rate (measured in packets per second)
+        * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+        * used.
+        */
+       u32     pkt_rate_low;
+       u32     rx_coalesce_usecs_low;
+       u32     rx_max_coalesced_frames_low;
+       u32     tx_coalesce_usecs_low;
+       u32     tx_max_coalesced_frames_low;
+
+       /* When the packet rate is below pkt_rate_high but above
+        * pkt_rate_low (both measured in packets per second) the
+        * normal {rx,tx}_* coalescing parameters are used.
+        */
+
+       /* When the packet rate is (measured in packets per second)
+        * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+        * used.
+        */
+       u32     pkt_rate_high;
+       u32     rx_coalesce_usecs_high;
+       u32     rx_max_coalesced_frames_high;
+       u32     tx_coalesce_usecs_high;
+       u32     tx_max_coalesced_frames_high;
+
+       /* How often to do adaptive coalescing packet rate sampling,
+        * measured in seconds.  Must not be zero.
+        */
+       u32     rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE      0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM     0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+       u32     cmd;    /* ETHTOOL_{G,S}RINGPARAM */
+
+       /* Read only attributes.  These indicate the maximum number
+        * of pending RX/TX ring entries the driver will allow the
+        * user to set.
+        */
+       u32     rx_max_pending;
+       u32     rx_mini_max_pending;
+       u32     rx_jumbo_max_pending;
+       u32     tx_max_pending;
+
+       /* Values changeable by the user.  The valid values are
+        * in the range 1 to the "*_max_pending" counterpart above.
+        */
+       u32     rx_pending;
+       u32     rx_mini_pending;
+       u32     rx_jumbo_pending;
+       u32     tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM     0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM    0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+       u32     cmd;    /* ETHTOOL_{G,S}PAUSEPARAM */
+
+       /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+        * being true) the user may set 'autoneg' here non-zero to have the
+        * pause parameters be auto-negotiated too.  In such a case, the
+        * {rx,tx}_pause values below determine what capabilities are
+        * advertised.
+        *
+        * If 'autoneg' is zero or the link is not being auto-negotiated,
+        * then {rx,tx}_pause force the driver to use/not-use pause
+        * flow control.
+        */
+       u32     autoneg;
+       u32     rx_pause;
+       u32     tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM    0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM                0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM                0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM                0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM                0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG            0x00000018 /* Get scatter-gather enable
+                                           * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG            0x00000019 /* Set scatter-gather enable
+                                           * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST           0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS       0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID                0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS         0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO           0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO           0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN    32
+#endif
+
+#ifndef SPEED_2500
+#define SPEED_2500 2500
+#endif
+#ifndef SPEED_5000
+#define SPEED_5000 5000
+#endif
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+
+#ifndef AX_RELEASE_CODE
+#define AX_RELEASE_CODE 0
+#endif
+
+#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3)
+#endif
+
+#ifndef RHEL_RELEASE_CODE
+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
+#define RHEL_RELEASE_CODE 0
+#endif
+
+/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find
+ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new
+ * enough versions of Ubuntu. Otherwise you can simply see it in the output of
+ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in
+ * the linux-source package, but in the linux-headers package. It begins to
+ * appear in later releases of 14.04 and 14.10.
+ *
+ * Ex:
+ * <Ubuntu 14.04.1>
+ *  $uname -r
+ *  3.13.0-45-generic
+ * ABI is 45
+ *
+ * <Ubuntu 14.10>
+ *  $uname -r
+ *  3.16.0-23-generic
+ * ABI is 23
+ */
+#ifndef UTS_UBUNTU_RELEASE_ABI
+#define UTS_UBUNTU_RELEASE_ABI 0
+#define UBUNTU_VERSION_CODE 0
+#else
+/* Ubuntu does not provide actual release version macro, so we use the kernel
+ * version plus the ABI to generate a unique version code specific to Ubuntu.
+ * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to
+ * ignore differences in sublevel which are not important since we have the
+ * ABI value. Otherwise, it becomes impossible to correlate ABI to version for
+ * ordering checks.
+ */
+#define UBUNTU_VERSION_CODE (((LINUX_VERSION_CODE & ~0xFF) << 8) + (UTS_UBUNTU_RELEASE_ABI))
+
+#if UTS_UBUNTU_RELEASE_ABI > 255
+#error UTS_UBUNTU_RELEASE_ABI is too large...
+#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */
+
+#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) )
+/* Our version code scheme does not make sense for non 3.x or newer kernels,
+ * and we have no support in kcompat for this scenario. Thus, treat this as a
+ * non-Ubuntu kernel. Possibly might be better to error here.
+ */
+#define UTS_UBUNTU_RELEASE_ABI 0
+#define UBUNTU_VERSION_CODE 0
+#endif
+
+#endif
+
+/* Note that the 3rd digit is always zero, and will be ignored. This is
+ * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux
+ * version codes are 3 digit, this 3rd digit is superseded by the ABI value.
+ */
+#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d))
+
+/* SuSE version macro is the same as Linux kernel version */
+#ifndef SLE_VERSION
+#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
+#endif
+#ifdef CONFIG_SUSE_KERNEL
+#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
+/* SLES11 GA is 2.6.27 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) )
+/* SLES11 SP2 is 3.0.13 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,2,0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76)))
+/* SLES11 SP3 is 3.0.76 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)))
+/* SLES11 SP4 is 3.0.101 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,4,0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)))
+/* SLES12 GA is 3.12.28 based */
+#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
+/* new SLES kernels must be added here with >= based on kernel
+ * the idea is to order from newest to oldest and just catch all
+ * of them using the >=
+ */
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,47)))
+/* SLES12 SP1 is 3.12.47-based */
+#define SLE_VERSION_CODE SLE_VERSION(12,1,0)
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
+#endif /* CONFIG_SUSE_KERNEL */
+#ifndef SLE_VERSION_CODE
+#define SLE_VERSION_CODE 0
+#endif /* SLE_VERSION_CODE */
+
+#ifdef __KLOCWORK__
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+#endif /* __KLOCWORK__ */
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+extern void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+extern int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data)              \
+       do {                                            \
+               INIT_LIST_HEAD(&(_tq)->list);           \
+               (_tq)->sync = 0;                        \
+               (_tq)->routine = _routine;              \
+               (_tq)->data = _data;                    \
+       } while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
+/* Generic MII registers. */
+#define MII_BMCR            0x00        /* Basic mode control register */
+#define MII_BMSR            0x01        /* Basic mode status register  */
+#define MII_PHYSID1         0x02        /* PHYS ID 1                   */
+#define MII_PHYSID2         0x03        /* PHYS ID 2                   */
+#define MII_ADVERTISE       0x04        /* Advertisement control reg   */
+#define MII_LPA             0x05        /* Link partner ability reg    */
+#define MII_EXPANSION       0x06        /* Expansion register          */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX           0x0100  /* Full duplex                 */
+#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
+/* Basic mode status register. */
+#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
+#define BMSR_ANEGCAPABLE        0x0008  /* Able to do auto-negotiation */
+#define BMSR_10HALF             0x0800  /* Can do 10mbps, half-duplex  */
+#define BMSR_10FULL             0x1000  /* Can do 10mbps, full-duplex  */
+#define BMSR_100HALF            0x2000  /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL            0x4000  /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA          0x0001  /* Only selector supported     */
+#define ADVERTISE_10HALF        0x0020  /* Try for 10mbps half-duplex  */
+#define ADVERTISE_10FULL        0x0040  /* Try for 10mbps full-duplex  */
+#define ADVERTISE_100HALF       0x0080  /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL       0x0100  /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+                       ADVERTISE_100HALF | ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE   0x0004  /* This enables npage words    */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+extern void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+                              PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+                              PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+                              PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+                              ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+       #define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x,y) ({ \
+       const typeof(x) _x = (x);       \
+       const typeof(y) _y = (y);       \
+       (void) (&_x == &_y);            \
+       _x < _y ? _x : _y; })
+
+#undef max
+#define max(x,y) ({ \
+       const typeof(x) _x = (x);       \
+       const typeof(y) _y = (y);       \
+       (void) (&_x == &_y);            \
+       _x > _y ? _x : _y; })
+
+#define min_t(type,x,y) ({ \
+       type _x = (x); \
+       type _y = (y); \
+       _x < _y ? _x : _y; })
+
+#define max_t(type,x,y) ({ \
+       type _x = (x); \
+       type _y = (y); \
+       _x > _y ? _x : _y; })
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head) \
+       for (pos = (head)->next, n = pos->next; pos != (head); \
+               pos = n, n = pos->next)
+#endif
+
+#ifndef ____cacheline_aligned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_aligned_in_smp ____cacheline_aligned
+#else
+#define ____cacheline_aligned_in_smp
+#endif /* CONFIG_SMP */
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
+#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
+extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
+#else /* 2.4.8 => 2.4.9 */
+extern int snprintf(char * buf, size_t size, const char *fmt, ...);
+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#endif
+#endif /* 2.4.10 -> 2.4.6 */
+
+
+/*****************************************************************************/
+/* 2.4.12 => 2.4.10 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+       NETIF_MSG_DRV           = 0x0001,
+       NETIF_MSG_PROBE         = 0x0002,
+       NETIF_MSG_LINK          = 0x0004,
+       NETIF_MSG_TIMER         = 0x0008,
+       NETIF_MSG_IFDOWN        = 0x0010,
+       NETIF_MSG_IFUP          = 0x0020,
+       NETIF_MSG_RX_ERR        = 0x0040,
+       NETIF_MSG_TX_ERR        = 0x0080,
+       NETIF_MSG_TX_QUEUED     = 0x0100,
+       NETIF_MSG_INTR          = 0x0200,
+       NETIF_MSG_TX_DONE       = 0x0400,
+       NETIF_MSG_RX_STATUS     = 0x0800,
+       NETIF_MSG_PKTDATA       = 0x1000,
+       NETIF_MSG_HW            = 0x2000,
+       NETIF_MSG_WOL           = 0x4000,
+};
+
+#define netif_msg_drv(p)       ((p)->msg_enable & NETIF_MSG_DRV)
+#define netif_msg_probe(p)     ((p)->msg_enable & NETIF_MSG_PROBE)
+#define netif_msg_link(p)      ((p)->msg_enable & NETIF_MSG_LINK)
+#define netif_msg_timer(p)     ((p)->msg_enable & NETIF_MSG_TIMER)
+#define netif_msg_ifdown(p)    ((p)->msg_enable & NETIF_MSG_IFDOWN)
+#define netif_msg_ifup(p)      ((p)->msg_enable & NETIF_MSG_IFUP)
+#define netif_msg_rx_err(p)    ((p)->msg_enable & NETIF_MSG_RX_ERR)
+#define netif_msg_tx_err(p)    ((p)->msg_enable & NETIF_MSG_TX_ERR)
+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+#define netif_msg_intr(p)      ((p)->msg_enable & NETIF_MSG_INTR)
+#define netif_msg_tx_done(p)   ((p)->msg_enable & NETIF_MSG_TX_DONE)
+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
+#define netif_msg_pktdata(p)   ((p)->msg_enable & NETIF_MSG_PKTDATA)
+#endif /* !HAVE_NETIF_MSG */
+#endif /* 2.4.12 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+       #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax()    rep_nop()
+#endif
+
+struct vlan_ethhdr {
+       unsigned char h_dest[ETH_ALEN];
+       unsigned char h_source[ETH_ALEN];
+       unsigned short h_vlan_proto;
+       unsigned short h_vlan_TCI;
+       unsigned short h_vlan_encapsulated_proto;
+};
+#endif /* 2.4.13 => 2.4.12 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
+
+#ifndef __devexit_p
+       #define __devexit_p(x) &(x)
+#endif
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
+#define NETIF_MSG_HW   0x2000
+#define NETIF_MSG_WOL  0x4000
+
+#ifndef netif_msg_hw
+#define netif_msg_hw(p)                ((p)->msg_enable & NETIF_MSG_HW)
+#endif
+#ifndef netif_msg_wol
+#define netif_msg_wol(p)       ((p)->msg_enable & NETIF_MSG_WOL)
+#endif
+#endif /* 2.4.18 */
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef NAPI
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#define pci_name(x)    ((x)->slot_name)
+
+#ifndef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full      (1 << 12)
+#endif
+#ifndef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full     (1 << 12)
+#endif
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+       while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+               /* No hurry */
+               current->state = TASK_INTERRUPTIBLE;
+               schedule_timeout(1);
+       }
+}
+#endif
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+       clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+       spin_lock_bh(&dev->xmit_lock);
+       netif_stop_queue(dev);
+       spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#else /* 2.4.23 => 2.4.22 */
+#define HAVE_SCTP
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) )
+#define __user
+#endif /* < 2.4.27 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
+#define sk_protocol protocol
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
+{
+       /* use default */
+       if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+               return default_msg_enable_bits;
+       if (debug_value == 0) /* no output */
+               return 0;
+       /* set low N bits */
+       return (1 << debug_value) -1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
+     (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
+      ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
+#include <linux/rtnetlink.h>
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+/*
+ * Most of the dma compat code is copied/modifed from the 2.4.37
+ * /include/linux/libata-compat.h header file
+ */
+/* These definitions mirror those in pci.h, so they can be used
+ * interchangeably with their PCI_ counterparts */
+enum dma_data_direction {
+       DMA_BIDIRECTIONAL = 0,
+       DMA_TO_DEVICE = 1,
+       DMA_FROM_DEVICE = 2,
+       DMA_NONE = 3,
+};
+
+struct device {
+       struct pci_dev pdev;
+};
+
+static inline struct pci_dev *to_pci_dev (struct device *dev)
+{
+       return (struct pci_dev *) dev;
+}
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+       return (struct device *) pdev;
+}
+
+#define pdev_printk(lvl, pdev, fmt, args...)   \
+       printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
+#define dev_err(dev, fmt, args...)            \
+       pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
+#define dev_info(dev, fmt, args...)            \
+       pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
+#define dev_warn(dev, fmt, args...)            \
+       pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+#define dev_notice(dev, fmt, args...)            \
+       pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
+#define dev_dbg(dev, fmt, args...) \
+       pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args)
+
+/* NOTE: dangerous! we ignore the 'gfp' argument */
+#define dma_alloc_coherent(dev,sz,dma,gfp) \
+       pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
+#define dma_free_coherent(dev,sz,addr,dma_addr) \
+       pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
+
+#define dma_map_page(dev,a,b,c,d) \
+       pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
+#define dma_unmap_page(dev,a,b,c) \
+       pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_single(dev,a,b,c) \
+       pci_map_single(to_pci_dev(dev),(a),(b),(c))
+#define dma_unmap_single(dev,a,b,c) \
+       pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_sg(dev, sg, nents, dir) \
+       pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
+#define dma_unmap_sg(dev, sg, nents, dir) \
+       pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
+
+#define dma_sync_single(dev,a,b,c) \
+       pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
+
+/* for range just sync everything, that's all the pci API can do */
+#define dma_sync_single_range(dev,addr,off,sz,dir) \
+       pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
+
+#define dma_set_mask(dev,mask) \
+       pci_set_dma_mask(to_pci_dev(dev),(mask))
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+       struct hlist_node *first;
+};
+
+struct hlist_node {
+       struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+       struct hlist_node *next = n->next;
+       struct hlist_node **pprev = n->pprev;
+       *pprev = next;
+       if (next)
+       next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+       __hlist_del(n);
+       n->next = NULL;
+       n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+       struct hlist_node *first = h->first;
+       n->next = first;
+       if (first)
+               first->pprev = &n->next;
+       h->first = n;
+       n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+       return !h->first;
+}
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+       h->next = NULL;
+       h->pprev = NULL;
+}
+
+#ifndef might_sleep
+#define might_sleep()
+#endif
+#else
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+       return &pdev->dev;
+}
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+#define cancel_work_sync(x) flush_scheduled_work()
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#ifndef read_barrier_depends
+#define read_barrier_depends() rmb()
+#endif
+
+#ifndef rcu_head
+struct __kc_callback_head {
+       struct __kc_callback_head *next;
+       void (*func)(struct callback_head *head);
+};
+#define rcu_head __kc_callback_head
+#endif
+
+#undef get_cpu
+#define get_cpu() smp_processor_id()
+#undef put_cpu
+#define put_cpu() do { } while(0)
+#define MODULE_INFO(version, _version)
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+#endif
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
+#endif
+
+#define dma_set_coherent_mask(dev,mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
+#endif
+
+#undef ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+#ifndef page_count
+#define page_count(p) atomic_read(&(p)->count)
+#endif
+
+#ifdef MAX_NUMNODES
+#undef MAX_NUMNODES
+#endif
+#define MAX_NUMNODES 1
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include <linux/bitops.h>
+#define BITOP_WORD(nr)          ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+extern unsigned long _kc_find_next_bit(const unsigned long *addr,
+                                       unsigned long size,
+                                       unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+       if (strchr(dev->name, '%'))
+               return "(unregistered net_device)";
+       return dev->name;
+}
+#define netdev_name(netdev)    _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#ifndef strlcpy
+#define strlcpy _kc_strlcpy
+extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
+#endif /* strlcpy */
+
+#ifndef do_div
+#if BITS_PER_LONG == 64
+# define do_div(n,base) ({                                     \
+       uint32_t __base = (base);                               \
+       uint32_t __rem;                                         \
+       __rem = ((uint64_t)(n)) % __base;                       \
+       (n) = ((uint64_t)(n)) / __base;                         \
+       __rem;                                                  \
+ })
+#elif BITS_PER_LONG == 32
+extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
+# define do_div(n,base) ({                             \
+       uint32_t __base = (base);                       \
+       uint32_t __rem;                                 \
+       if (likely(((n) >> 32) == 0)) {                 \
+               __rem = (uint32_t)(n) % __base;         \
+               (n) = (uint32_t)(n) / __base;           \
+       } else                                          \
+               __rem = _kc__div64_32(&(n), __base);    \
+       __rem;                                          \
+ })
+#else /* BITS_PER_LONG == ?? */
+# error do_div() does not yet support the C64
+#endif /* BITS_PER_LONG */
+#endif /* do_div */
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC   1000000000L
+#endif
+
+#undef HAVE_I2C_SUPPORT
+#else /* 2.6.0 */
+
+#endif /* 2.6.0 => 2.5.28 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name,dev,size,align,allocation) \
+       pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
+#endif /* < 2.6.3 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define dma_sync_single_for_cpu                dma_sync_single
+#define dma_sync_single_for_device     dma_sync_single
+#define dma_sync_single_range_for_cpu          dma_sync_single_range
+#define dma_sync_single_range_for_device       dma_sync_single_range
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+       return dma_addr == 0;
+}
+#endif
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
+#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+        if (nbits <= BITS_PER_LONG)
+                *dst = 0UL;
+        else {
+                int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+                memset(dst, 0, len);
+        }
+}
+#define page_to_nid(x) 0
+
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+       return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+
+#ifndef __force
+#define __force
+#endif
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8
+#endif
+#ifndef PCI_EXP_DEVCTL_CERE
+#define PCI_EXP_DEVCTL_CERE 0x0001
+#endif
+#define PCI_EXP_FLAGS          2       /* Capabilities register */
+#define PCI_EXP_FLAGS_VERS     0x000f  /* Capability version */
+#define PCI_EXP_FLAGS_TYPE     0x00f0  /* Device/Port type */
+#define  PCI_EXP_TYPE_ENDPOINT 0x0     /* Express Endpoint */
+#define  PCI_EXP_TYPE_LEG_END  0x1     /* Legacy Endpoint */
+#define  PCI_EXP_TYPE_ROOT_PORT 0x4    /* Root Port */
+#define  PCI_EXP_TYPE_DOWNSTREAM 0x6   /* Downstream Port */
+#define PCI_EXP_FLAGS_SLOT     0x0100  /* Slot implemented */
+#define PCI_EXP_DEVCAP         4       /* Device capabilities */
+#define PCI_EXP_DEVSTA         10      /* Device Status */
+#define msleep(x)      do { set_current_state(TASK_UNINTERRUPTIBLE); \
+                               schedule_timeout((x * HZ)/1000 + 2); \
+                       } while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+#include <net/dsfield.h>
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC    1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+       return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+       if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return m * (HZ / MSEC_PER_SEC);
+#else
+       return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+       unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+       while (timeout && !signal_pending(current)) {
+               __set_current_state(TASK_INTERRUPTIBLE);
+               timeout = schedule_timeout(timeout);
+       }
+       return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000         0x0040  /* MSB of Speed (1000)         */
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#endif
+#ifndef __be32
+#define __be32 u32
+#endif
+#ifndef __be64
+#define __be64 u64
+#endif
+
+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
+{
+       return (struct vlan_ethhdr *)skb->mac.raw;
+}
+
+/* Wake-On-Lan options. */
+#define WAKE_PHY               (1 << 0)
+#define WAKE_UCAST             (1 << 1)
+#define WAKE_MCAST             (1 << 2)
+#define WAKE_BCAST             (1 << 3)
+#define WAKE_ARP               (1 << 4)
+#define WAKE_MAGIC             (1 << 5)
+#define WAKE_MAGICSECURE       (1 << 6) /* only meaningful if WAKE_MAGIC */
+
+#define skb_header_pointer _kc_skb_header_pointer
+static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
+                                           int offset, int len, void *buffer)
+{
+       int hlen = skb_headlen(skb);
+
+       if (hlen - offset >= len)
+               return skb->data + offset;
+
+#ifdef MAX_SKB_FRAGS
+       if (skb_copy_bits(skb, offset, buffer, len) < 0)
+               return NULL;
+
+       return buffer;
+#else
+       return NULL;
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+}
+
+#ifndef __bitwise
+#define __bitwise
+#endif
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm)          \
+       static struct kparam_array __param_arr_##name                    \
+       = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
+           sizeof(array[0]), array };                                   \
+       module_param_call(name, param_array_set, param_array_get,        \
+                         &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+/*
+ * num_online is broken for all < 2.6.10 kernels.  This is needed to support
+ * Node module parameter of ixgbe.
+ */
+#undef num_online_nodes
+#define num_online_nodes(n) 1
+extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
+#undef node_online_map
+#define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
+#define PCI_D0      0
+#define PCI_D1      1
+#define PCI_D2      2
+#define PCI_D3hot   3
+#define PCI_D3cold  4
+typedef int pci_power_t;
+#define pci_choose_state(pdev,state) state
+#define PMSG_SUSPEND 3
+#define PCI_EXP_LNKCTL 16
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#define KC_USEC_PER_SEC        1000000L
+#define usecs_to_jiffies _kc_usecs_to_jiffies
+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+       return (KC_USEC_PER_SEC / HZ) * j;
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+       return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
+#else
+       return (j * KC_USEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
+{
+       if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+       return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+       return m * (HZ / KC_USEC_PER_SEC);
+#else
+       return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
+#endif
+}
+
+#define PCI_EXP_LNKCAP         12      /* Link Capabilities */
+#define PCI_EXP_LNKSTA         18      /* Link Status */
+#define PCI_EXP_SLTCAP         20      /* Slot Capabilities */
+#define PCI_EXP_SLTCTL         24      /* Slot Control */
+#define PCI_EXP_SLTSTA         26      /* Slot Status */
+#define PCI_EXP_RTCTL          28      /* Root Control */
+#define PCI_EXP_RTCAP          30      /* Root Capabilities */
+#define PCI_EXP_RTSTA          32      /* Root Status */
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
+#include <linux/reboot.h>
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000        0x09        /* 1000BASE-T control          */
+#define MII_STAT1000        0x0a        /* 1000BASE-T status           */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP     0x0400  /* Try for pause               */
+#define ADVERTISE_PAUSE_ASYM    0x0800  /* Try for asymmetric pause     */
+/* Link partner ability register. */
+#define LPA_PAUSE_CAP          0x0400  /* Can pause                   */
+#define LPA_PAUSE_ASYM         0x0800  /* Can pause asymetrically     */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL      0x0200  /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF     0x0100  /* Advertise 1000BASE-T half duplex */
+/* 1000BASE-T Status register */
+#define LPA_1000LOCALRXOK      0x2000  /* Link partner local receiver status */
+#define LPA_1000REMRXOK                0x1000  /* Link partner remote receiver status */
+
+#ifndef is_zero_ether_addr
+#define is_zero_ether_addr _kc_is_zero_ether_addr
+static inline int _kc_is_zero_ether_addr(const u8 *addr)
+{
+       return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+#endif /* is_zero_ether_addr */
+#ifndef is_multicast_ether_addr
+#define is_multicast_ether_addr _kc_is_multicast_ether_addr
+static inline int _kc_is_multicast_ether_addr(const u8 *addr)
+{
+       return addr[0] & 0x01;
+}
+#endif /* is_multicast_ether_addr */
+#endif /* < 2.6.12 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+#ifndef kstrdup
+#define kstrdup _kc_kstrdup
+extern char *_kc_kstrdup(const char *s, unsigned int gfp);
+#endif
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS        0x0f        /* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN           0x0100  /* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL     0x2000  /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF     0x1000  /* Can do 1000BT Half */
+
+#define SUPPORTED_Pause                (1 << 13)
+#define SUPPORTED_Asym_Pause   (1 << 14)
+#define ADVERTISED_Pause       (1 << 13)
+#define ADVERTISED_Asym_Pause  (1 << 14)
+
+#if (!(RHEL_RELEASE_CODE && \
+       (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
+       (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
+#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
+#define gfp_t unsigned
+#else
+typedef unsigned gfp_t;
+#endif
+#endif /* !RHEL4.3->RHEL5.0 */
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
+#ifdef CONFIG_X86_64
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir)       \
+       dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir)    \
+       dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
+#endif
+#endif
+#endif /* < 2.6.14 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
+#ifndef kfree_rcu
+/* this is placed here due to a lack of rcu_barrier in previous kernels */
+#define kfree_rcu(_ptr, _offset) kfree(_ptr)
+#endif /* kfree_rcu */
+#ifndef vmalloc_node
+#define vmalloc_node(a,b) vmalloc(a)
+#endif /* vmalloc_node*/
+
+#define setup_timer(_timer, _function, _data) \
+do { \
+       (_timer)->function = _function; \
+       (_timer)->data = _data; \
+       init_timer(_timer); \
+} while (0)
+#ifndef device_can_wakeup
+#define device_can_wakeup(dev) (1)
+#endif
+#ifndef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val)     do{}while(0)
+#endif
+#ifndef device_init_wakeup
+#define device_init_wakeup(dev,val) do {} while (0)
+#endif
+static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
+{
+       const u16 *a = (const u16 *) addr1;
+       const u16 *b = (const u16 *) addr2;
+
+       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+#undef compare_ether_addr
+#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
+#endif /* < 2.6.15 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
+#undef DEFINE_MUTEX
+#define DEFINE_MUTEX(x)        DECLARE_MUTEX(x)
+#define mutex_lock(x)  down_interruptible(x)
+#define mutex_unlock(x)        up(x)
+
+#ifndef ____cacheline_internodealigned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
+#else
+#define ____cacheline_internodealigned_in_smp
+#endif /* CONFIG_SMP */
+#endif /* ____cacheline_internodealigned_in_smp */
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) )
+#ifdef device_can_wakeup
+#undef device_can_wakeup
+#endif /* device_can_wakeup */
+#define device_can_wakeup(dev) 1
+#endif /* SLE_VERSION(10,4,0) */
+#endif /* < 2.6.16 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...)            \
+       dev_printk(KERN_NOTICE, dev, fmt, ## args)
+#endif
+
+#ifndef first_online_node
+#define first_online_node 0
+#endif
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+#endif /* < 2.6.17 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef FIELD_SIZEOF
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+       return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#ifndef resource_size_t
+#define resource_size_t unsigned long
+#endif
+
+#ifdef skb_pad
+#undef skb_pad
+#endif
+#define skb_pad(x,y) _kc_skb_pad(x, y)
+int _kc_skb_pad(struct sk_buff *skb, int pad);
+#ifdef skb_padto
+#undef skb_padto
+#endif
+#define skb_padto(x,y) _kc_skb_padto(x, y)
+static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
+{
+       unsigned int size = skb->len;
+       if(likely(size >= len))
+               return 0;
+       return _kc_skb_pad(skb, len - size);
+}
+
+#ifndef DECLARE_PCI_UNMAP_ADDR
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+       dma_addr_t ADDR_NAME
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+       u32 LEN_NAME
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+       ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+       (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+       ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+       (((PTR)->LEN_NAME) = (VAL))
+#endif /* DECLARE_PCI_UNMAP_ADDR */
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+
+/* other values will be created as #defines later */
+enum pci_bus_speed {
+       PCI_SPEED_UNKNOWN = 0xff,
+};
+
+enum pcie_link_width {
+       PCIE_LNK_WIDTH_RESRV    = 0x00,
+       PCIE_LNK_X1             = 0x01,
+       PCIE_LNK_X2             = 0x02,
+       PCIE_LNK_X4             = 0x04,
+       PCIE_LNK_X8             = 0x08,
+       PCIE_LNK_X12            = 0x0C,
+       PCIE_LNK_X16            = 0x10,
+       PCIE_LNK_X32            = 0x20,
+       PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
+};
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
+#define i_private u.generic_ip
+#endif /* >= RHEL 5.0 */
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
+#if (!((RHEL_RELEASE_CODE && \
+        ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
+          RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
+         (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0))))))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#undef CONFIG_INET_LRO
+#undef CONFIG_INET_LRO_MODULE
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
+typedef void (*new_handler_t)(int, void*);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#endif /* >= 2.5.x */
+{
+       irq_handler_t new_handler = (irq_handler_t) handler;
+       return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#define pci_config_space_ich8lan() do {} while(0)
+#undef pci_save_state
+extern int _kc_pci_save_state(struct pci_dev *);
+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
+#undef pci_restore_state
+extern void _kc_pci_restore_state(struct pci_dev *);
+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+#undef free_netdev
+extern void _kc_free_netdev(struct net_device *);
+#define free_netdev(netdev) _kc_free_netdev(netdev)
+#endif
+static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev)
+{
+       return 0;
+}
+#define pci_disable_pcie_error_reporting(dev) do {} while (0)
+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
+
+extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
+#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+#else /* 2.6.19 */
+#include <linux/aer.h>
+#include <linux/string.h>
+#include <linux/pci_hotplug.h>
+#endif /* < 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+       INIT_LIST_HEAD(&(_work)->entry); \
+       (_work)->pending = 0; \
+       (_work)->func = (void (*)(void *))_func; \
+       (_work)->data = _work; \
+       init_timer(&(_work)->timer); \
+} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev)        \
+       PCI_VENDOR_ID_##ven, (dev),  \
+       PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#define HAVE_EARLY_VMALLOC_NODE
+#define dev_to_node(dev) -1
+#undef set_dev_node
+/* remove compiler warning with b=b, for unused variable */
+#define set_dev_node(a, b) do { (b) = (b); } while(0)
+
+#if (!(RHEL_RELEASE_CODE && \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+        (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+#endif
+
+#if (!(RHEL_RELEASE_CODE && \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+        (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+static inline __wsum csum_unfold(__sum16 n)
+{
+       return (__force __wsum)n;
+}
+#endif
+
+#else /* < 2.6.20 */
+#define HAVE_DEVICE_NUMA_NODE
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+#define NETDEV_CLASS_DEV
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev)             \
+       do {                                            \
+               if (vg) vg->vlan_devices[id] = dev;     \
+       } while (0)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define pci_channel_offline(pdev) (pdev->error_state && \
+       pdev->error_state != pci_channel_io_normal)
+#define pci_request_selected_regions(pdev, bars, name) \
+        pci_request_regions(pdev, name)
+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+
+#ifndef __aligned
+#define __aligned(x)                   __attribute__((aligned(x)))
+#endif
+
+extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
+#define netdev_to_dev(netdev)  \
+       pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
+#else
+static inline struct device *netdev_to_dev(struct net_device *netdev)
+{
+       return &netdev->dev;
+}
+
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_reset_tail_pointer(skb) \
+       do { \
+               skb->tail = skb->data; \
+       } while (0)
+#define skb_set_tail_pointer(skb, offset) \
+       do { \
+               skb->tail = skb->data + offset; \
+       } while (0)
+#define skb_copy_to_linear_data(skb, from, len) \
+                               memcpy(skb->data, from, len)
+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
+                               memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#define cancel_work_sync(x) flush_scheduled_work()
+#ifndef udp_hdr
+#define udp_hdr _udp_hdr
+static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
+{
+       return (struct udphdr *)skb_transport_header(skb);
+}
+#endif
+
+#ifdef cpu_to_be16
+#undef cpu_to_be16
+#endif
+#define cpu_to_be16(x) __constant_htons(x)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
+enum {
+       DUMP_PREFIX_NONE,
+       DUMP_PREFIX_ADDRESS,
+       DUMP_PREFIX_OFFSET
+};
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
+#ifndef hex_asc
+#define hex_asc(x)     "0123456789abcdef"[x]
+#endif
+#include <linux/ctype.h>
+extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
+                              int prefix_type, int rowsize, int groupsize,
+                              const void *buf, size_t len, bool ascii);
+#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
+               _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
+#ifndef ADVERTISED_2500baseX_Full
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#endif
+#ifndef SUPPORTED_2500baseX_Full
+#define SUPPORTED_2500baseX_Full (1 << 15)
+#endif
+
+#ifndef ETH_P_PAUSE
+#define ETH_P_PAUSE 0x8808
+#endif
+
+#else /* 2.6.22 */
+#define ETH_TYPE_TRANS_SETS_DEV
+#define HAVE_NETDEV_STATS_IN_NETDEV
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
+#endif /* > 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+#define netif_subqueue_stopped(_a, _b) 0
+#ifndef PTR_ALIGN
+#define PTR_ALIGN(p, a)         ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#endif
+
+#ifndef CONFIG_PM_SLEEP
+#define CONFIG_PM_SLEEP        CONFIG_PM
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
+#define HAVE_ETHTOOL_GET_PERM_ADDR
+#endif /* 2.6.14 through 2.6.22 */
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifndef ETH_FLAG_LRO
+#define ETH_FLAG_LRO NETIF_F_LRO
+#endif
+
+#ifndef ACCESS_ONCE
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#endif
+
+/* if GRO is supported then the napi struct must already exist */
+#ifndef NETIF_F_GRO
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+       /* used to look up the real NAPI polling routine */
+       int (*poll)(struct napi_struct *, int);
+       struct net_device *dev;
+       int weight;
+};
+#endif
+
+#ifdef NAPI
+extern int __kc_adapter_clean(struct net_device *, int *);
+/* The following definitions are multi-queue aware, and thus we have a driver
+ * define list which determines which drivers support multiple queues, and
+ * thus need these stronger defines. If a driver does not support multi-queue
+ * functionality, you don't need to add it to this list.
+ */
+extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
+
+static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+                                         int (*poll)(struct napi_struct *, int), int weight)
+{
+       struct net_device *poll_dev = napi_to_poll_dev(napi);
+       poll_dev->poll = __kc_adapter_clean;
+       poll_dev->priv = napi;
+       poll_dev->weight = weight;
+       set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state);
+       set_bit(__LINK_STATE_START, &poll_dev->state);
+       dev_hold(poll_dev);
+       napi->poll = poll;
+       napi->weight = weight;
+       napi->dev = dev;
+}
+#define netif_napi_add __kc_mq_netif_napi_add
+
+static inline void __kc_mq_netif_napi_del(struct napi_struct *napi)
+{
+       struct net_device *poll_dev = napi_to_poll_dev(napi);
+       WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state));
+       dev_put(poll_dev);
+       memset(poll_dev, 0, sizeof(struct net_device));
+}
+
+#define netif_napi_del __kc_mq_netif_napi_del
+
+static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi)
+{
+       return netif_running(napi->dev) &&
+               netif_rx_schedule_prep(napi_to_poll_dev(napi));
+}
+#define napi_schedule_prep __kc_mq_napi_schedule_prep
+
+static inline void __kc_mq_napi_schedule(struct napi_struct *napi)
+{
+       if (napi_schedule_prep(napi))
+               __netif_rx_schedule(napi_to_poll_dev(napi));
+}
+#define napi_schedule __kc_mq_napi_schedule
+
+#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
+#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
+#ifdef CONFIG_SMP
+static inline void napi_synchronize(const struct napi_struct *n)
+{
+       struct net_device *dev = napi_to_poll_dev(n);
+
+       while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+               /* No hurry. */
+               msleep(1);
+       }
+}
+#else
+#define napi_synchronize(n)    barrier()
+#endif /* CONFIG_SMP */
+#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
+static inline void _kc_napi_complete(struct napi_struct *napi)
+{
+#ifdef NETIF_F_GRO
+       napi_gro_flush(napi);
+#endif
+       netif_rx_complete(napi_to_poll_dev(napi));
+}
+#define napi_complete _kc_napi_complete
+#else /* NAPI */
+
+/* The following definitions are only used if we don't support NAPI at all. */
+
+static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+                                 int (*poll)(struct napi_struct *, int), int weight)
+{
+       dev->poll = poll;
+       dev->weight = weight;
+       napi->poll = poll;
+       napi->weight = weight;
+       napi->dev = dev;
+}
+#define netif_napi_del(_a) do {} while (0)
+#endif /* NAPI */
+
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n)        (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
+#endif
+
+#ifdef NETIF_F_TSO6
+#define skb_is_gso_v6 _kc_skb_is_gso_v6
+static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
+{
+       return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+#endif /* NETIF_F_TSO6 */
+
+#ifndef KERN_CONT
+#define KERN_CONT      ""
+#endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) \
+       printk(KERN_ERR fmt, ##arg)
+#endif
+
+#ifndef rounddown_pow_of_two
+#define rounddown_pow_of_two(n) \
+       __builtin_constant_p(n) ? ( \
+               (n == 1) ? 0 : \
+               (1UL << ilog2(n))) : \
+               (1UL << (fls_long(n) - 1))
+#endif
+
+#else /* < 2.6.24 */
+#define HAVE_ETHTOOL_GET_SSET_COUNT
+#define HAVE_NETDEV_NAPI_LIST
+#endif /* < 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#define INCLUDE_PM_QOS_PARAMS_H
+#include <linux/pm_qos_params.h>
+#else /* >= 3.2.0 */
+#include <linux/pm_qos.h>
+#endif /* else >= 3.2.0 */
+#endif /* > 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
+#define PM_QOS_CPU_DMA_LATENCY 1
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
+#include <linux/latency.h>
+#define PM_QOS_DEFAULT_VALUE   INFINITE_LATENCY
+#define pm_qos_add_requirement(pm_qos_class, name, value) \
+               set_acceptable_latency(name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name) \
+               remove_acceptable_latency(name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) \
+               modify_acceptable_latency(name, value)
+#else
+#define PM_QOS_DEFAULT_VALUE   -1
+#define pm_qos_add_requirement(pm_qos_class, name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) { \
+       if (value != PM_QOS_DEFAULT_VALUE) { \
+               printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
+                       pci_name(adapter->pdev)); \
+       } \
+}
+
+#endif /* > 2.6.18 */
+
+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
+
+#ifndef DEFINE_PCI_DEVICE_TABLE
+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
+#endif /* DEFINE_PCI_DEVICE_TABLE */
+
+#ifndef strict_strtol
+#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r)
+static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res)
+{
+       /* adapted from strict_strtoul() in 2.6.25 */
+       char *tail;
+       long val;
+       size_t len;
+
+       *res = 0;
+       len = strlen(buf);
+       if (!len)
+               return -EINVAL;
+       val = simple_strtol(buf, &tail, base);
+       if (tail == buf)
+               return -EINVAL;
+       if ((*tail == '\0') ||
+           ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) {
+               *res = val;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+#endif
+
+#else /* < 2.6.25 */
+
+#endif /* < 2.6.25 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+#ifndef clamp_t
+#define clamp_t(type, val, min, max) ({                \
+       type __val = (val);                     \
+       type __min = (min);                     \
+       type __max = (max);                     \
+       __val = __val < __min ? __min : __val;  \
+       __val > __max ? __max : __val; })
+#endif /* clamp_t */
+#undef kzalloc_node
+#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
+
+extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
+#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
+#else /* < 2.6.26 */
+#define NETDEV_CAN_SET_GSO_MAX_SIZE
+#include <linux/pci-aspm.h>
+#define HAVE_NETDEV_VLAN_FEATURES
+#ifndef PCI_EXP_LNKCAP_ASPMS
+#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#endif /* PCI_EXP_LNKCAP_ASPMS */
+#endif /* < 2.6.26 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+                                            __u32 speed)
+{
+       ep->speed = (__u16)speed;
+       /* ep->speed_hi = (__u16)(speed >> 16); */
+}
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+
+static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
+{
+       /* no speed_hi before 2.6.27, and probably no need for it yet */
+       return (__u32)ep->speed;
+}
+#define ethtool_cmd_speed _kc_ethtool_cmd_speed
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
+#define ANCIENT_PM 1
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
+       (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
+       defined(CONFIG_PM_SLEEP))
+#define NEWER_PM 1
+#endif
+#if defined(ANCIENT_PM) || defined(NEWER_PM)
+#undef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) \
+       do { \
+               u16 pmc = 0; \
+               int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
+               if (pm) { \
+                       pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
+                               &pmc); \
+               } \
+               (dev)->power.can_wakeup = !!(pmc >> 11); \
+               (dev)->power.should_wakeup = (val && (pmc >> 11)); \
+       } while (0)
+#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
+#endif /* 2.6.15 through 2.6.27 */
+#ifndef netif_napi_del
+#define netif_napi_del(_a) do {} while (0)
+#ifdef NAPI
+#ifdef CONFIG_NETPOLL
+#undef netif_napi_del
+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
+#endif
+#endif
+#endif /* netif_napi_del */
+#ifdef dma_mapping_error
+#undef dma_mapping_error
+#endif
+#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
+
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+#define HAVE_TX_MQ
+#endif
+
+#ifdef HAVE_TX_MQ
+extern void _kc_netif_tx_stop_all_queues(struct net_device *);
+extern void _kc_netif_tx_wake_all_queues(struct net_device *);
+extern void _kc_netif_tx_start_all_queues(struct net_device *);
+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
+#undef netif_stop_subqueue
+#define netif_stop_subqueue(_ndev,_qi) do { \
+       if (netif_is_multiqueue((_ndev))) \
+               netif_stop_subqueue((_ndev), (_qi)); \
+       else \
+               netif_stop_queue((_ndev)); \
+       } while (0)
+#undef netif_start_subqueue
+#define netif_start_subqueue(_ndev,_qi) do { \
+       if (netif_is_multiqueue((_ndev))) \
+               netif_start_subqueue((_ndev), (_qi)); \
+       else \
+               netif_start_queue((_ndev)); \
+       } while (0)
+#else /* HAVE_TX_MQ */
+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
+#define netif_tx_start_all_queues(a) netif_start_queue(a)
+#else
+#define netif_tx_start_all_queues(a) do {} while (0)
+#endif
+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
+#endif /* HAVE_TX_MQ */
+#ifndef NETIF_F_MULTI_QUEUE
+#define NETIF_F_MULTI_QUEUE 0
+#define netif_is_multiqueue(a) 0
+#define netif_wake_subqueue(a, b)
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef __WARN_printf
+extern void __kc_warn_slowpath(const char *file, const int line,
+               const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
+#endif /* __WARN_printf */
+
+#ifndef WARN
+#define WARN(condition, format...) ({                                          \
+       int __ret_warn_on = !!(condition);                              \
+       if (unlikely(__ret_warn_on))                                    \
+               __WARN_printf(format);                                  \
+       unlikely(__ret_warn_on);                                        \
+})
+#endif /* WARN */
+#undef HAVE_IXGBE_DEBUG_FS
+#undef HAVE_IGB_DEBUG_FS
+#else /* < 2.6.27 */
+#define HAVE_TX_MQ
+#define HAVE_NETDEV_SELECT_QUEUE
+#ifdef CONFIG_DEBUG_FS
+#define HAVE_IXGBE_DEBUG_FS
+#define HAVE_IGB_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+#define pci_ioremap_bar(pdev, bar)     ioremap(pci_resource_start(pdev, bar), \
+                                               pci_resource_len(pdev, bar))
+#define pci_wake_from_d3 _kc_pci_wake_from_d3
+#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
+extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
+extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
+#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
+#ifndef __skb_queue_head_init
+static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
+{
+       list->prev = list->next = (struct sk_buff *)list;
+       list->qlen = 0;
+}
+#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
+#endif
+
+#define PCI_EXP_DEVCAP2                36      /* Device Capabilities 2 */
+#define PCI_EXP_DEVCTL2                40      /* Device Control 2 */
+
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+#ifndef swap
+#define swap(a, b) \
+       do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+#endif
+#define pci_request_selected_regions_exclusive(pdev, bars, name) \
+               pci_request_selected_regions(pdev, bars, name)
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif /* CONFIG_NR_CPUS */
+#ifndef pcie_aspm_enabled
+#define pcie_aspm_enabled()   (1)
+#endif /* pcie_aspm_enabled */
+
+#define  PCI_EXP_SLTSTA_PDS    0x0040  /* Presence Detect State */
+
+#ifndef pci_clear_master
+extern void _kc_pci_clear_master(struct pci_dev *dev);
+#define pci_clear_master(dev)  _kc_pci_clear_master(dev)
+#endif
+
+#ifndef PCI_EXP_LNKCTL_ASPMC
+#define  PCI_EXP_LNKCTL_ASPMC  0x0003  /* ASPM Control */
+#endif
+#else /* < 2.6.29 */
+#ifndef HAVE_NET_DEVICE_OPS
+#define HAVE_NET_DEVICE_OPS
+#endif
+#ifdef CONFIG_DCB
+#define HAVE_PFC_MODE_ENABLE
+#endif /* CONFIG_DCB */
+#endif /* < 2.6.29 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
+#define NO_PTP_SUPPORT
+#define skb_rx_queue_recorded(a) false
+#define skb_get_rx_queue(a) 0
+#define skb_record_rx_queue(a, b) do {} while (0)
+#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
+#ifndef CONFIG_PCI_IOV
+#undef pci_enable_sriov
+#define pci_enable_sriov(a, b) -ENOTSUPP
+#undef pci_disable_sriov
+#define pci_disable_sriov(a) do {} while (0)
+#endif /* CONFIG_PCI_IOV */
+#ifndef pr_cont
+#define pr_cont(fmt, ...) \
+       printk(KERN_CONT fmt, ##__VA_ARGS__)
+#endif /* pr_cont */
+static inline void _kc_synchronize_irq(unsigned int a)
+{
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+       synchronize_irq();
+#else /* < 2.5.28 */
+       synchronize_irq(a);
+#endif /* < 2.5.28 */
+}
+#undef synchronize_irq
+#define synchronize_irq(a) _kc_synchronize_irq(a)
+
+#define PCI_EXP_LNKCTL2                48      /* Link Control 2 */
+
+#ifdef nr_cpus_node
+#undef nr_cpus_node
+#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
+#endif
+
+#else /* < 2.6.30 */
+#define HAVE_ASPM_QUIRKS
+#endif /* < 2.6.30 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
+#define ETH_P_1588 0x88F7
+#define ETH_P_FIP  0x8914
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc_count)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(uclist, dev) \
+       for (uclist = dev->uc_list; uclist; uclist = uclist->next)
+#endif
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#ifndef MDIO_PHY_ID_PRTAD
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#endif
+#ifndef MDIO_PHY_ID_DEVAD
+#define MDIO_PHY_ID_DEVAD 0x001f
+#endif
+#ifndef skb_dst
+#define skb_dst(s) ((s)->dst)
+#endif
+
+#ifndef SUPPORTED_1000baseKX_Full
+#define SUPPORTED_1000baseKX_Full      (1 << 17)
+#endif
+#ifndef SUPPORTED_10000baseKX4_Full
+#define SUPPORTED_10000baseKX4_Full    (1 << 18)
+#endif
+#ifndef SUPPORTED_10000baseKR_Full
+#define SUPPORTED_10000baseKR_Full     (1 << 19)
+#endif
+
+#ifndef ADVERTISED_1000baseKX_Full
+#define ADVERTISED_1000baseKX_Full     (1 << 17)
+#endif
+#ifndef ADVERTISED_10000baseKX4_Full
+#define ADVERTISED_10000baseKX4_Full   (1 << 18)
+#endif
+#ifndef ADVERTISED_10000baseKR_Full
+#define ADVERTISED_10000baseKR_Full    (1 << 19)
+#endif
+
+#else /* < 2.6.31 */
+#ifndef HAVE_NETDEV_STORAGE_ADDRESS
+#define HAVE_NETDEV_STORAGE_ADDRESS
+#endif
+#ifndef HAVE_NETDEV_HW_ADDR
+#define HAVE_NETDEV_HW_ADDR
+#endif
+#ifndef HAVE_TRANS_START_IN_QUEUE
+#define HAVE_TRANS_START_IN_QUEUE
+#endif
+#ifndef HAVE_INCLUDE_LINUX_MDIO_H
+#define HAVE_INCLUDE_LINUX_MDIO_H
+#endif
+#include <linux/mdio.h>
+#endif /* < 2.6.31 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
+#undef netdev_tx_t
+#define netdev_tx_t int
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef NETIF_F_FCOE_MTU
+#define NETIF_F_FCOE_MTU       (1 << 26)
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline int _kc_pm_runtime_get_sync()
+{
+       return 1;
+}
+#define pm_runtime_get_sync(dev)       _kc_pm_runtime_get_sync()
+#else /* 2.6.0 => 2.6.32 */
+static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev)
+{
+       return 1;
+}
+#ifndef pm_runtime_get_sync
+#define pm_runtime_get_sync(dev)       _kc_pm_runtime_get_sync(dev)
+#endif
+#endif /* 2.6.0 => 2.6.32 */
+#ifndef pm_runtime_put
+#define pm_runtime_put(dev)            do {} while (0)
+#endif
+#ifndef pm_runtime_put_sync
+#define pm_runtime_put_sync(dev)       do {} while (0)
+#endif
+#ifndef pm_runtime_resume
+#define pm_runtime_resume(dev)         do {} while (0)
+#endif
+#ifndef pm_schedule_suspend
+#define pm_schedule_suspend(dev, t)    do {} while (0)
+#endif
+#ifndef pm_runtime_set_suspended
+#define pm_runtime_set_suspended(dev)  do {} while (0)
+#endif
+#ifndef pm_runtime_disable
+#define pm_runtime_disable(dev)                do {} while (0)
+#endif
+#ifndef pm_runtime_put_noidle
+#define pm_runtime_put_noidle(dev)     do {} while (0)
+#endif
+#ifndef pm_runtime_set_active
+#define pm_runtime_set_active(dev)     do {} while (0)
+#endif
+#ifndef pm_runtime_enable
+#define pm_runtime_enable(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_get_noresume
+#define pm_runtime_get_noresume(dev)   do {} while (0)
+#endif
+#else /* < 2.6.32 */
+#if (RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_NET_DEVICE_EXTENDED
+#endif /* RHEL >= 6.2 && RHEL < 7.0 */
+#if (RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_NET_DEVICE_OPS_EXT
+#define HAVE_NDO_SET_FEATURES
+#endif /* RHEL >= 6.6 && RHEL < 7.0 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
+#define HAVE_NETDEV_OPS_FCOE_ENABLE
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_OPS_GETAPP
+#define HAVE_DCBNL_OPS_GETAPP
+#endif
+#endif /* CONFIG_DCB */
+#include <linux/pm_runtime.h>
+/* IOV bad DMA target work arounds require at least this kernel rev support */
+#define HAVE_PCIE_TYPE
+#endif /* < 2.6.32 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
+#ifndef pci_pcie_cap
+#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
+#endif
+#ifndef IPV4_FLOW
+#define IPV4_FLOW 0x10
+#endif /* IPV4_FLOW */
+#ifndef IPV6_FLOW
+#define IPV6_FLOW 0x11
+#endif /* IPV6_FLOW */
+/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
+#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
+      (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* RHEL6 or SLES11 SP1 */
+#ifndef __percpu
+#define __percpu
+#endif /* __percpu */
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
+#if ((RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
+#undef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)       dma_addr_t ADDR_NAME
+#undef DEFINE_DMA_UNMAP_LEN
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)         __u32 LEN_NAME
+#undef dma_unmap_addr
+#define dma_unmap_addr(PTR, ADDR_NAME)         ((PTR)->ADDR_NAME)
+#undef dma_unmap_addr_set
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)        (((PTR)->ADDR_NAME) = (VAL))
+#undef dma_unmap_len
+#define dma_unmap_len(PTR, LEN_NAME)           ((PTR)->LEN_NAME)
+#undef dma_unmap_len_set
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL)  (((PTR)->LEN_NAME) = (VAL))
+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE && \
+       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \
+        ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
+         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))))
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+       return !!pci_pcie_cap(dev);
+}
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE && \
+      (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))))
+#define sk_tx_queue_get(_sk) (-1)
+#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0)
+#endif /* !(RHEL >= 6.2) */
+
+#if (RHEL_RELEASE_CODE && \
+     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#define HAVE_ETHTOOL_GET_TS_INFO
+#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5))
+#define HAVE_ETHTOOL_GSRSSH
+#define HAVE_RHEL6_SRIOV_CONFIGURE
+#define HAVE_RXFH_NONCONST
+#endif /* RHEL > 6.5 */
+#endif /* RHEL >= 6.4 && RHEL < 7.0 */
+
+#else /* < 2.6.33 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
+#define HAVE_NETDEV_OPS_FCOE_GETWWN
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#endif /* < 2.6.33 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+extern int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
+
+#ifndef ETH_FLAG_NTUPLE
+#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
+#endif
+
+#ifndef netdev_mc_count
+#define netdev_mc_count(dev) ((dev)->mc_count)
+#endif
+#ifndef netdev_mc_empty
+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(mclist, dev) \
+       for (mclist = dev->mc_list; mclist; mclist = mclist->next)
+#endif
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc.count)
+#endif
+#ifndef netdev_uc_empty
+#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(ha, dev) \
+       list_for_each_entry(ha, &dev->uc.list, list)
+#endif
+#ifndef dma_set_coherent_mask
+#define dma_set_coherent_mask(dev,mask) \
+       pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
+#endif
+#ifndef pci_dev_run_wake
+#define pci_dev_run_wake(pdev) (0)
+#endif
+
+/* netdev logging taken from include/linux/netdevice.h */
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+       if (dev->reg_state != NETREG_REGISTERED)
+               return "(unregistered net_device)";
+       return dev->name;
+}
+#define netdev_name(netdev)    _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#undef netdev_printk
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#define netdev_printk(level, netdev, format, args...)          \
+do {                                                           \
+       struct pci_dev *pdev = _kc_netdev_to_pdev(netdev);      \
+       printk(level "%s: " format, pci_name(pdev), ##args);    \
+} while(0)
+#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define netdev_printk(level, netdev, format, args...)          \
+do {                                                           \
+       struct pci_dev *pdev = _kc_netdev_to_pdev(netdev);      \
+       struct device *dev = pci_dev_to_dev(pdev);              \
+       dev_printk(level, dev, "%s: " format,                   \
+                  netdev_name(netdev), ##args);                \
+} while(0)
+#else /* 2.6.21 => 2.6.34 */
+#define netdev_printk(level, netdev, format, args...)          \
+       dev_printk(level, (netdev)->dev.parent,                 \
+                  "%s: " format,                               \
+                  netdev_name(netdev), ##args)
+#endif /* <2.6.0 <2.6.21 <2.6.34 */
+#undef netdev_emerg
+#define netdev_emerg(dev, format, args...)                     \
+       netdev_printk(KERN_EMERG, dev, format, ##args)
+#undef netdev_alert
+#define netdev_alert(dev, format, args...)                     \
+       netdev_printk(KERN_ALERT, dev, format, ##args)
+#undef netdev_crit
+#define netdev_crit(dev, format, args...)                      \
+       netdev_printk(KERN_CRIT, dev, format, ##args)
+#undef netdev_err
+#define netdev_err(dev, format, args...)                       \
+       netdev_printk(KERN_ERR, dev, format, ##args)
+#undef netdev_warn
+#define netdev_warn(dev, format, args...)                      \
+       netdev_printk(KERN_WARNING, dev, format, ##args)
+#undef netdev_notice
+#define netdev_notice(dev, format, args...)                    \
+       netdev_printk(KERN_NOTICE, dev, format, ##args)
+#undef netdev_info
+#define netdev_info(dev, format, args...)                      \
+       netdev_printk(KERN_INFO, dev, format, ##args)
+#undef netdev_dbg
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...)                     \
+       netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+#define netdev_dbg(__dev, format, args...)                     \
+do {                                                           \
+       dynamic_dev_dbg((__dev)->dev.parent, "%s: " format,     \
+                       netdev_name(__dev), ##args);            \
+} while (0)
+#else /* DEBUG */
+#define netdev_dbg(__dev, format, args...)                     \
+({                                                             \
+       if (0)                                                  \
+               netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+       0;                                                      \
+})
+#endif /* DEBUG */
+
+#undef netif_printk
+#define netif_printk(priv, type, level, dev, fmt, args...)     \
+do {                                                           \
+       if (netif_msg_##type(priv))                             \
+               netdev_printk(level, (dev), fmt, ##args);       \
+} while (0)
+
+#undef netif_emerg
+#define netif_emerg(priv, type, dev, fmt, args...)             \
+       netif_level(emerg, priv, type, dev, fmt, ##args)
+#undef netif_alert
+#define netif_alert(priv, type, dev, fmt, args...)             \
+       netif_level(alert, priv, type, dev, fmt, ##args)
+#undef netif_crit
+#define netif_crit(priv, type, dev, fmt, args...)              \
+       netif_level(crit, priv, type, dev, fmt, ##args)
+#undef netif_err
+#define netif_err(priv, type, dev, fmt, args...)               \
+       netif_level(err, priv, type, dev, fmt, ##args)
+#undef netif_warn
+#define netif_warn(priv, type, dev, fmt, args...)              \
+       netif_level(warn, priv, type, dev, fmt, ##args)
+#undef netif_notice
+#define netif_notice(priv, type, dev, fmt, args...)            \
+       netif_level(notice, priv, type, dev, fmt, ##args)
+#undef netif_info
+#define netif_info(priv, type, dev, fmt, args...)              \
+       netif_level(info, priv, type, dev, fmt, ##args)
+#undef netif_dbg
+#define netif_dbg(priv, type, dev, fmt, args...)               \
+       netif_level(dbg, priv, type, dev, fmt, ##args)
+
+#ifdef SET_SYSTEM_SLEEP_PM_OPS
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#endif
+
+#ifndef for_each_set_bit
+#define for_each_set_bit(bit, addr, size) \
+       for ((bit) = find_first_bit((addr), (size)); \
+               (bit) < (size); \
+               (bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit */
+
+#ifndef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
+#define dma_unmap_addr pci_unmap_addr
+#define dma_unmap_addr_set pci_unmap_addr_set
+#define dma_unmap_len pci_unmap_len
+#define dma_unmap_len_set pci_unmap_len_set
+#endif /* DEFINE_DMA_UNMAP_ADDR */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3))
+#ifdef IGB_HWMON
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define sysfs_attr_init(attr)                          \
+       do {                                            \
+               static struct lock_class_key __key;     \
+               (attr)->key = &__key;                   \
+       } while (0)
+#else
+#define sysfs_attr_init(attr) do {} while (0)
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* IGB_HWMON */
+#endif /* RHEL_RELEASE_CODE */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline bool _kc_pm_runtime_suspended()
+{
+       return false;
+}
+#define pm_runtime_suspended(dev)      _kc_pm_runtime_suspended()
+#else /* 2.6.0 => 2.6.34 */
+static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev)
+{
+       return false;
+}
+#ifndef pm_runtime_suspended
+#define pm_runtime_suspended(dev)      _kc_pm_runtime_suspended(dev)
+#endif
+#endif /* 2.6.0 => 2.6.34 */
+
+#define PCIE_SPEED_2_5GT 0x14
+#define PCIE_SPEED_5_0GT 0x15
+#define PCIE_SPEED_8_0GT 0x16
+
+#else /* < 2.6.34 */
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#ifndef HAVE_SET_RX_MODE
+#define HAVE_SET_RX_MODE
+#endif
+
+#endif /* < 2.6.34 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+                                  const void __user *from, size_t count);
+#define simple_write_to_buffer _kc_simple_write_to_buffer
+
+#ifndef PCI_EXP_LNKSTA_NLW_SHIFT
+#define PCI_EXP_LNKSTA_NLW_SHIFT 4
+#endif
+
+#ifndef numa_node_id
+#define numa_node_id() 0
+#endif
+#ifndef numa_mem_id
+#define numa_mem_id numa_node_id
+#endif
+#ifdef HAVE_TX_MQ
+#include <net/sch_generic.h>
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
+#define netif_set_real_num_tx_queues  _kc_netif_set_real_num_tx_queues
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#define netif_set_real_num_tx_queues(_netdev, _count) \
+       do { \
+               (_netdev)->egress_subqueue_count = _count; \
+       } while (0)
+#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#else /* HAVE_TX_MQ */
+#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
+#endif /* HAVE_TX_MQ */
+#ifndef ETH_FLAG_RXHASH
+#define ETH_FLAG_RXHASH (1<<28)
+#endif /* ETH_FLAG_RXHASH */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
+#define HAVE_IRQ_AFFINITY_HINT
+#endif
+#else /* < 2.6.35 */
+#define HAVE_PM_QOS_REQUEST_LIST
+#define HAVE_IRQ_AFFINITY_HINT
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
+#define ethtool_op_set_flags _kc_ethtool_op_set_flags
+extern u32 _kc_ethtool_op_get_flags(struct net_device *);
+#define ethtool_op_get_flags _kc_ethtool_op_get_flags
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef NET_IP_ALIGN
+#undef NET_IP_ALIGN
+#endif
+#define NET_IP_ALIGN 0
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+
+#ifdef NET_SKB_PAD
+#undef NET_SKB_PAD
+#endif
+
+#if (L1_CACHE_BYTES > 32)
+#define NET_SKB_PAD L1_CACHE_BYTES
+#else
+#define NET_SKB_PAD 32
+#endif
+
+static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
+                                                           unsigned int length)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
+       if (skb) {
+#if (NET_IP_ALIGN + NET_SKB_PAD)
+               skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+#endif
+               skb->dev = dev;
+       }
+       return skb;
+}
+
+#ifdef netdev_alloc_skb_ip_align
+#undef netdev_alloc_skb_ip_align
+#endif
+#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
+
+#undef netif_level
+#define netif_level(level, priv, type, dev, fmt, args...)      \
+do {                                                           \
+       if (netif_msg_##type(priv))                             \
+               netdev_##level(dev, fmt, ##args);               \
+} while (0)
+
+#undef usleep_range
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#define u64_stats_update_begin(a) do { } while(0)
+#define u64_stats_update_end(a) do { } while(0)
+#define u64_stats_fetch_begin(a) do { } while(0)
+#define u64_stats_fetch_retry_bh(a) (0)
+#define u64_stats_fetch_begin_bh(a) (0)
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
+#define HAVE_8021P_SUPPORT
+#endif
+
+/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+     !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0)))
+static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb)
+{
+       return;
+}
+#endif
+
+#else /* < 2.6.36 */
+
+#define HAVE_PM_QOS_REQUEST_ACTIVE
+#define HAVE_8021P_SUPPORT
+#define HAVE_NDO_GET_STATS64
+#endif /* < 2.6.36 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#ifndef netif_set_real_num_rx_queues
+static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev,
+                                                   unsigned int __always_unused rxq)
+{
+       return 0;
+}
+#define netif_set_real_num_rx_queues(dev, rxq) \
+       __kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
+#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
+#endif
+#ifndef VLAN_N_VID
+#define VLAN_N_VID     VLAN_GROUP_ARRAY_LEN
+#endif /* VLAN_N_VID */
+#ifndef ETH_FLAG_TXVLAN
+#define ETH_FLAG_TXVLAN (1 << 7)
+#endif /* ETH_FLAG_TXVLAN */
+#ifndef ETH_FLAG_RXVLAN
+#define ETH_FLAG_RXVLAN (1 << 8)
+#endif /* ETH_FLAG_RXVLAN */
+
+static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
+{
+       WARN_ON(skb->ip_summed != CHECKSUM_NONE);
+}
+#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
+
+static inline void *_kc_vzalloc_node(unsigned long size, int node)
+{
+       void *addr = vmalloc_node(size, node);
+       if (addr)
+               memset(addr, 0, size);
+       return addr;
+}
+#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
+
+static inline void *_kc_vzalloc(unsigned long size)
+{
+       void *addr = vmalloc(size);
+       if (addr)
+               memset(addr, 0, size);
+       return addr;
+}
+#define vzalloc(_size) _kc_vzalloc(_size)
+
+#ifndef vlan_get_protocol
+static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
+{
+       if (vlan_tx_tag_present(skb) ||
+           skb->protocol != cpu_to_be16(ETH_P_8021Q))
+               return skb->protocol;
+
+       if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
+               return 0;
+
+       return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
+}
+#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
+#endif
+#ifdef HAVE_HW_TIME_STAMP
+#define SKBTX_HW_TSTAMP (1 << 0)
+#define SKBTX_IN_PROGRESS (1 << 2)
+#define SKB_SHARED_TX_IS_UNION
+#endif
+
+#ifndef device_wakeup_enable
+#define device_wakeup_enable(dev)      device_set_wakeup_enable(dev, true)
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
+#ifndef HAVE_VLAN_RX_REGISTER
+#define HAVE_VLAN_RX_REGISTER
+#endif
+#endif /* > 2.4.18 */
+#endif /* < 2.6.37 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
+#else /* 2.6.22 -> 2.6.37 */
+static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
+{
+        return skb->csum_start - skb_headroom(skb);
+}
+#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
+#endif /* 2.6.22 -> 2.6.37 */
+#ifdef CONFIG_DCB
+#ifndef IEEE_8021QAZ_MAX_TCS
+#define IEEE_8021QAZ_MAX_TCS 8
+#endif
+#ifndef DCB_CAP_DCBX_HOST
+#define DCB_CAP_DCBX_HOST              0x01
+#endif
+#ifndef DCB_CAP_DCBX_LLD_MANAGED
+#define DCB_CAP_DCBX_LLD_MANAGED       0x02
+#endif
+#ifndef DCB_CAP_DCBX_VER_CEE
+#define DCB_CAP_DCBX_VER_CEE           0x04
+#endif
+#ifndef DCB_CAP_DCBX_VER_IEEE
+#define DCB_CAP_DCBX_VER_IEEE          0x08
+#endif
+#ifndef DCB_CAP_DCBX_STATIC
+#define DCB_CAP_DCBX_STATIC            0x10
+#endif
+#endif /* CONFIG_DCB */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))
+#define CONFIG_XPS
+#endif /* RHEL_RELEASE_VERSION(6,2) */
+#endif /* < 2.6.38 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#ifndef TC_BITMASK
+#define TC_BITMASK 15
+#endif
+#ifndef NETIF_F_RXCSUM
+#define NETIF_F_RXCSUM         (1 << 29)
+#endif
+#ifndef skb_queue_reverse_walk_safe
+#define skb_queue_reverse_walk_safe(queue, skb, tmp)                           \
+               for (skb = (queue)->prev, tmp = skb->prev;                      \
+                    skb != (struct sk_buff *)(queue);                          \
+                    skb = tmp, tmp = skb->prev)
+#endif
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef FCOE_MTU
+#define FCOE_MTU       2158
+#endif
+#endif
+#ifdef CONFIG_DCB
+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
+#endif
+#endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)))
+#define kstrtoul(a, b, c)  ((*(c)) = simple_strtoul((a), NULL, (b)), 0)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16);
+#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q))
+extern u8 _kc_netdev_get_num_tc(struct net_device *dev);
+#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev)
+extern int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc);
+#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc))
+#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0)
+#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0)
+extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up);
+#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up)
+#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0)
+#else /* RHEL6.1 or greater */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif /* HAVE_MQPRIO */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#ifndef IEEE_8021QAZ_TSA_STRICT
+#define IEEE_8021QAZ_TSA_STRICT                0
+#endif
+#ifndef IEEE_8021QAZ_TSA_ETS
+#define IEEE_8021QAZ_TSA_ETS           2
+#endif
+#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
+#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
+#endif
+#endif
+#endif /* CONFIG_DCB */
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+
+#ifndef udp_csum
+#define udp_csum __kc_udp_csum
+static inline __wsum __kc_udp_csum(struct sk_buff *skb)
+{
+       __wsum csum = csum_partial(skb_transport_header(skb),
+                                  sizeof(struct udphdr), skb->csum);
+
+       for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
+               csum = csum_add(csum, skb->csum);
+       }
+       return csum;
+}
+#endif /* udp_csum */
+#else /* < 2.6.39 */
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
+#endif
+#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif
+#ifndef HAVE_SETUP_TC
+#define HAVE_SETUP_TC
+#endif
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#endif
+#endif /* CONFIG_DCB */
+#ifndef HAVE_NDO_SET_FEATURES
+#define HAVE_NDO_SET_FEATURES
+#endif
+#endif /* < 2.6.39 */
+
+/*****************************************************************************/
+/* use < 2.6.40 because of a Fedora 15 kernel update where they
+ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
+ * like set_phys_id for ethtool.
+ */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
+#ifdef ETHTOOL_GRXRINGS
+#ifndef FLOW_EXT
+#define        FLOW_EXT        0x80000000
+union _kc_ethtool_flow_union {
+       struct ethtool_tcpip4_spec              tcp_ip4_spec;
+       struct ethtool_usrip4_spec              usr_ip4_spec;
+       __u8                                    hdata[60];
+};
+struct _kc_ethtool_flow_ext {
+       __be16  vlan_etype;
+       __be16  vlan_tci;
+       __be32  data[2];
+};
+struct _kc_ethtool_rx_flow_spec {
+       __u32           flow_type;
+       union _kc_ethtool_flow_union h_u;
+       struct _kc_ethtool_flow_ext h_ext;
+       union _kc_ethtool_flow_union m_u;
+       struct _kc_ethtool_flow_ext m_ext;
+       __u64           ring_cookie;
+       __u32           location;
+};
+#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
+#endif /* FLOW_EXT */
+#endif
+
+#define pci_disable_link_state_locked pci_disable_link_state
+
+#ifndef PCI_LTR_VALUE_MASK
+#define  PCI_LTR_VALUE_MASK    0x000003ff
+#endif
+#ifndef PCI_LTR_SCALE_MASK
+#define  PCI_LTR_SCALE_MASK    0x00001c00
+#endif
+#ifndef PCI_LTR_SCALE_SHIFT
+#define  PCI_LTR_SCALE_SHIFT   10
+#endif
+
+#else /* < 2.6.40 */
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#endif /* < 2.6.40 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) )
+#define USE_LEGACY_PM_SUPPORT
+#ifndef kfree_rcu
+#define kfree_rcu(_ptr, _rcu_head) do {                                \
+       void __kc_kfree_rcu(struct rcu_head *rcu_head)                  \
+       {                                                               \
+               void *ptr = container_of(rcu_head,                      \
+                                        typeof(*_ptr),                 \
+                                        _rcu_head);                    \
+               kfree(ptr);                                             \
+       }                                                               \
+       call_rcu(&(_ptr)->_rcu_head, __kc_kfree_rcu);                   \
+} while (0)
+#define HAVE_KFREE_RCU_BARRIER
+#endif /* kfree_rcu */
+#ifndef kstrtol_from_user
+#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r)
+static inline int _kc_kstrtol_from_user(const char __user *s, size_t count,
+                                       unsigned int base, long *res)
+{
+       /* sign, base 2 representation, newline, terminator */
+       char buf[1 + sizeof(long) * 8 + 1 + 1];
+
+       count = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, s, count))
+               return -EFAULT;
+       buf[count] = '\0';
+       return strict_strtol(buf, base, res);
+}
+#endif
+
+/* 20000base_blah_full Supported and Advertised Registers */
+#define SUPPORTED_20000baseMLD2_Full   (1 << 21)
+#define SUPPORTED_20000baseKR2_Full    (1 << 22)
+#define ADVERTISED_20000baseMLD2_Full  (1 << 21)
+#define ADVERTISED_20000baseKR2_Full   (1 << 22)
+#endif /* < 3.0.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#ifndef __netdev_alloc_skb_ip_align
+#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
+#endif /* __netdev_alloc_skb_ip_align */
+#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
+#define dcb_ieee_delapp(dev, app) 0
+#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
+
+/* 1000BASE-T Control register */
+#define CTL1000_AS_MASTER      0x0800
+#define CTL1000_ENABLE_MASTER  0x1000
+
+/* kernels less than 3.0.0 don't have this */
+#ifndef ETH_P_8021AD
+#define ETH_P_8021AD   0x88A8
+#endif
+#else /* < 3.1.0 */
+#ifndef HAVE_DCBNL_IEEE_DELAPP
+#define HAVE_DCBNL_IEEE_DELAPP
+#endif
+#endif /* < 3.1.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#ifndef dma_zalloc_coherent
+#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f)
+static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size,
+                                           dma_addr_t *dma_handle, gfp_t flag)
+{
+       void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
+       if (ret)
+               memset(ret, 0, size);
+       return ret;
+}
+#endif
+#ifdef ETHTOOL_GRXRINGS
+#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+#endif /* ETHTOOL_GRXRINGS */
+
+#ifndef skb_frag_size
+#define skb_frag_size(frag)    _kc_skb_frag_size(frag)
+static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
+{
+       return frag->size;
+}
+#endif /* skb_frag_size */
+
+#ifndef skb_frag_size_sub
+#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
+static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+       frag->size -= delta;
+}
+#endif /* skb_frag_size_sub */
+
+#ifndef skb_frag_page
+#define skb_frag_page(frag)    _kc_skb_frag_page(frag)
+static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
+{
+       return frag->page;
+}
+#endif /* skb_frag_page */
+
+#ifndef skb_frag_address
+#define skb_frag_address(frag) _kc_skb_frag_address(frag)
+static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
+{
+       return page_address(skb_frag_page(frag)) + frag->page_offset;
+}
+#endif /* skb_frag_address */
+
+#ifndef skb_frag_dma_map
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+#include <linux/dma-mapping.h>
+#endif
+#define skb_frag_dma_map(dev,frag,offset,size,dir) \
+               _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
+static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
+                                             const skb_frag_t *frag,
+                                             size_t offset, size_t size,
+                                             enum dma_data_direction dir)
+{
+       return dma_map_page(dev, skb_frag_page(frag),
+                           frag->page_offset + offset, size, dir);
+}
+#endif /* skb_frag_dma_map */
+
+#ifndef __skb_frag_unref
+#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
+static inline void __kc_skb_frag_unref(skb_frag_t *frag)
+{
+       put_page(skb_frag_page(frag));
+}
+#endif /* __skb_frag_unref */
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN  -1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN 0xff
+#endif
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\
+     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)))
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#endif
+#endif
+#else /* < 3.2.0 */
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_VF_SPOOFCHK_CONFIGURE
+#endif
+#ifndef HAVE_SKB_L4_RXHASH
+#define HAVE_SKB_L4_RXHASH
+#endif
+#define HAVE_IOMMU_PRESENT
+#define HAVE_PM_QOS_REQUEST_LIST_NEW
+#endif /* < 3.2.0 */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
+#undef ixgbe_get_netdev_tc_txq
+#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
+#endif
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
+typedef u32 netdev_features_t;
+#endif
+#undef PCI_EXP_TYPE_RC_EC
+#define  PCI_EXP_TYPE_RC_EC    0xa     /* Root Complex Event Collector */
+#ifndef CONFIG_BQL
+#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0)
+#define netdev_completed_queue(_n, _p, _b) do {} while (0)
+#define netdev_tx_sent_queue(_q, _b) do {} while (0)
+#define netdev_sent_queue(_n, _b) do {} while (0)
+#define netdev_tx_reset_queue(_q) do {} while (0)
+#define netdev_reset_queue(_n) do {} while (0)
+#endif
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#endif /* SLE_VERSION(11,3,0) */
+#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q)
+#else /* ! < 3.3.0 */
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#define HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef ETHTOOL_SRXNTUPLE
+#undef ETHTOOL_SRXNTUPLE
+#endif
+#endif /* < 3.3.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+#ifndef NETIF_F_RXFCS
+#define NETIF_F_RXFCS  0
+#endif /* NETIF_F_RXFCS */
+#ifndef NETIF_F_RXALL
+#define NETIF_F_RXALL  0
+#endif /* NETIF_F_RXALL */
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define NUMTCS_RETURNS_U8
+
+int _kc_simple_open(struct inode *inode, struct file *file);
+#define simple_open _kc_simple_open
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
+
+#ifndef skb_add_rx_frag
+#define skb_add_rx_frag _kc_skb_add_rx_frag
+extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
+                               int, int, unsigned int);
+#endif
+#ifdef NET_ADDR_RANDOM
+#define eth_hw_addr_random(N) do { \
+       eth_random_addr(N->dev_addr); \
+       N->addr_assign_type |= NET_ADDR_RANDOM; \
+       } while (0)
+#else /* NET_ADDR_RANDOM */
+#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr)
+#endif /* NET_ADDR_RANDOM */
+#else /* < 3.4.0 */
+#include <linux/kconfig.h>
+#endif /* >= 3.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \
+    ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) )
+#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+#define HAVE_PTP_1588_CLOCK
+#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+
+#ifndef ether_addr_equal
+static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+       return !compare_ether_addr(addr1, addr2);
+}
+#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
+#endif
+
+#else
+#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
+#endif /* < 3.5.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) )
+#define PCI_EXP_LNKCAP2                44      /* Link Capability 2 */
+
+#ifndef MDIO_EEE_100TX
+#define MDIO_EEE_100TX         0x0002  /* 100TX EEE cap */
+#endif
+#ifndef MDIO_EEE_1000T
+#define MDIO_EEE_1000T         0x0004  /* 1000T EEE cap */
+#endif
+#ifndef MDIO_EEE_10GT
+#define MDIO_EEE_10GT          0x0008  /* 10GT EEE cap */
+#endif
+#ifndef MDIO_EEE_1000KX
+#define MDIO_EEE_1000KX                0x0010  /* 1000KX EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKX4
+#define MDIO_EEE_10GKX4                0x0020  /* 10G KX4 EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKR
+#define MDIO_EEE_10GKR         0x0040  /* 10G KR EEE cap */
+#endif
+
+#ifndef __GFP_MEMALLOC
+#define __GFP_MEMALLOC 0
+#endif
+
+#ifndef eth_random_addr
+#define eth_random_addr _kc_eth_random_addr
+static inline void _kc_eth_random_addr(u8 *addr)
+{
+        get_random_bytes(addr, ETH_ALEN);
+        addr[0] &= 0xfe; /* clear multicast */
+        addr[0] |= 0x02; /* set local assignment */
+}
+#endif /* eth_random_addr */
+#else /* < 3.6.0 */
+#define HAVE_STRUCT_PAGE_PFMEMALLOC
+#endif /* < 3.6.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+#ifndef ADVERTISED_40000baseKR4_Full
+/* these defines were all added in one commit, so should be safe
+ * to trigger activiation on one define
+ */
+#define SUPPORTED_40000baseKR4_Full    (1 << 23)
+#define SUPPORTED_40000baseCR4_Full    (1 << 24)
+#define SUPPORTED_40000baseSR4_Full    (1 << 25)
+#define SUPPORTED_40000baseLR4_Full    (1 << 26)
+#define ADVERTISED_40000baseKR4_Full   (1 << 23)
+#define ADVERTISED_40000baseCR4_Full   (1 << 24)
+#define ADVERTISED_40000baseSR4_Full   (1 << 25)
+#define ADVERTISED_40000baseLR4_Full   (1 << 26)
+#endif
+
+#ifndef mmd_eee_cap_to_ethtool_sup_t
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+       u32 supported = 0;
+
+       if (eee_cap & MDIO_EEE_100TX)
+               supported |= SUPPORTED_100baseT_Full;
+       if (eee_cap & MDIO_EEE_1000T)
+               supported |= SUPPORTED_1000baseT_Full;
+       if (eee_cap & MDIO_EEE_10GT)
+               supported |= SUPPORTED_10000baseT_Full;
+       if (eee_cap & MDIO_EEE_1000KX)
+               supported |= SUPPORTED_1000baseKX_Full;
+       if (eee_cap & MDIO_EEE_10GKX4)
+               supported |= SUPPORTED_10000baseKX4_Full;
+       if (eee_cap & MDIO_EEE_10GKR)
+               supported |= SUPPORTED_10000baseKR_Full;
+
+       return supported;
+}
+#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \
+       __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap)
+#endif /* mmd_eee_cap_to_ethtool_sup_t */
+
+#ifndef mmd_eee_adv_to_ethtool_adv_t
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+       u32 adv = 0;
+
+       if (eee_adv & MDIO_EEE_100TX)
+               adv |= ADVERTISED_100baseT_Full;
+       if (eee_adv & MDIO_EEE_1000T)
+               adv |= ADVERTISED_1000baseT_Full;
+       if (eee_adv & MDIO_EEE_10GT)
+               adv |= ADVERTISED_10000baseT_Full;
+       if (eee_adv & MDIO_EEE_1000KX)
+               adv |= ADVERTISED_1000baseKX_Full;
+       if (eee_adv & MDIO_EEE_10GKX4)
+               adv |= ADVERTISED_10000baseKX4_Full;
+       if (eee_adv & MDIO_EEE_10GKR)
+               adv |= ADVERTISED_10000baseKR_Full;
+
+       return adv;
+}
+
+#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \
+       __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv)
+#endif /* mmd_eee_adv_to_ethtool_adv_t */
+
+#ifndef ethtool_adv_to_mmd_eee_adv_t
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+       u16 reg = 0;
+
+       if (adv & ADVERTISED_100baseT_Full)
+               reg |= MDIO_EEE_100TX;
+       if (adv & ADVERTISED_1000baseT_Full)
+               reg |= MDIO_EEE_1000T;
+       if (adv & ADVERTISED_10000baseT_Full)
+               reg |= MDIO_EEE_10GT;
+       if (adv & ADVERTISED_1000baseKX_Full)
+               reg |= MDIO_EEE_1000KX;
+       if (adv & ADVERTISED_10000baseKX4_Full)
+               reg |= MDIO_EEE_10GKX4;
+       if (adv & ADVERTISED_10000baseKR_Full)
+               reg |= MDIO_EEE_10GKR;
+
+       return reg;
+}
+#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv)
+#endif /* ethtool_adv_to_mmd_eee_adv_t */
+
+#ifndef pci_pcie_type
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+static inline u8 pci_pcie_type(struct pci_dev *pdev)
+{
+       int pos;
+       u16 reg16;
+
+       pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+       if (!pos)
+               BUG();
+       pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+       return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+#else /* < 2.6.24 */
+#define pci_pcie_type(x)       (x)->pcie_type
+#endif /* < 2.6.24 */
+#endif /* pci_pcie_type */
+
+#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \
+    ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) )
+#define ptp_clock_register(caps, args...) ptp_clock_register(caps)
+#endif
+
+#ifndef pcie_capability_read_word
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
+#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v)
+#endif /* pcie_capability_read_word */
+
+#ifndef pcie_capability_write_word
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
+#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v)
+#endif /* pcie_capability_write_word */
+
+#ifndef pcie_capability_clear_and_set_word
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+                                           u16 clear, u16 set);
+#define pcie_capability_clear_and_set_word(d,p,c,s) \
+       __kc_pcie_capability_clear_and_set_word(d,p,c,s)
+#endif /* pcie_capability_clear_and_set_word */
+
+#ifndef pcie_capability_clear_word
+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
+                                            u16 clear);
+#define pcie_capability_clear_word(d, p, c) \
+       __kc_pcie_capability_clear_word(d, p, c)
+#endif /* pcie_capability_clear_word */
+
+#ifndef PCI_EXP_LNKSTA2
+#define PCI_EXP_LNKSTA2                50      /* Link Status 2 */
+#endif
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define USE_CONST_DEV_UC_CHAR
+#endif
+
+#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi)
+
+#else /* >= 3.7.0 */
+#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
+#define USE_CONST_DEV_UC_CHAR
+#endif /* >= 3.7.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
+#ifndef pci_sriov_set_totalvfs
+static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs)
+{
+       return 0;
+}
+#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b))
+#endif
+#ifndef PCI_EXP_LNKCTL_ASPM_L0S
+#define  PCI_EXP_LNKCTL_ASPM_L0S  0x01 /* L0s Enable */
+#endif
+#ifndef PCI_EXP_LNKCTL_ASPM_L1
+#define  PCI_EXP_LNKCTL_ASPM_L1   0x02 /* L1 Enable */
+#endif
+#define HAVE_CONFIG_HOTPLUG
+/* Reserved Ethernet Addresses per IEEE 802.1Q */
+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = {
+       0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+
+#ifndef is_link_local_ether_addr
+static inline bool __kc_is_link_local_ether_addr(const u8 *addr)
+{
+       __be16 *a = (__be16 *)addr;
+       static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
+       static const __be16 m = cpu_to_be16(0xfff0);
+
+       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
+}
+#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr)
+#endif /* is_link_local_ether_addr */
+#else /* >= 3.8.0 */
+#ifndef __devinit
+#define __devinit
+#endif
+
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+
+#ifndef __devinitconst
+#define __devinitconst
+#endif
+
+#ifndef __devexit
+#define __devexit
+#endif
+
+#ifndef __devexit_p
+#define __devexit_p
+#endif
+
+#ifndef HAVE_ENCAP_CSUM_OFFLOAD
+#define HAVE_ENCAP_CSUM_OFFLOAD
+#endif
+
+#define HAVE_GRE_ENCAP_OFFLOAD
+
+#ifndef HAVE_SRIOV_CONFIGURE
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
+#define HAVE_BRIDGE_ATTRIBS
+#ifndef BRIDGE_MODE_VEB
+#define BRIDGE_MODE_VEB                0       /* Default loopback mode */
+#endif /* BRIDGE_MODE_VEB */
+#ifndef BRIDGE_MODE_VEPA
+#define BRIDGE_MODE_VEPA       1       /* 802.1Qbg defined VEPA mode */
+#endif /* BRIDGE_MODE_VEPA */
+#endif /* >= 3.8.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+
+#undef hlist_entry
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#undef hlist_entry_safe
+#define hlist_entry_safe(ptr, type, member) \
+       (ptr) ? hlist_entry(ptr, type, member) : NULL
+
+#undef hlist_for_each_entry
+#define hlist_for_each_entry(pos, head, member)                             \
+       for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
+            pos;                                                           \
+            pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#undef hlist_for_each_entry_safe
+#define hlist_for_each_entry_safe(pos, n, head, member)                    \
+       for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);   \
+            pos && ({ n = pos->member.next; 1; });                         \
+            pos = hlist_entry_safe(n, typeof(*pos), member))
+
+#ifdef CONFIG_XPS
+extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16);
+#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
+#else /* CONFIG_XPS */
+#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0)
+#endif /* CONFIG_XPS */
+
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#define _kc_hashrnd 0xd631614b /* not so random hash salt */
+extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+#define __netdev_pick_tx __kc_netdev_pick_tx
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#else
+#define HAVE_BRIDGE_FILTER
+#define HAVE_FDB_DEL_NLATTR
+#endif /* < 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifndef NAPI_POLL_WEIGHT
+#define NAPI_POLL_WEIGHT 64
+#endif
+#ifdef CONFIG_PCI_IOV
+extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
+#else
+static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev)
+{
+       return 0;
+}
+#endif
+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
+
+#ifndef list_first_entry_or_null
+#define list_first_entry_or_null(ptr, type, member) \
+       (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+#endif
+
+#ifndef VLAN_TX_COOKIE_MAGIC
+static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
+                                                        u16 vlan_tci)
+{
+#ifdef VLAN_TAG_PRESENT
+       vlan_tci |= VLAN_TAG_PRESENT;
+#endif
+       skb->vlan_tci = vlan_tci;
+        return skb;
+}
+#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \
+       __kc__vlan_hwaccel_put_tag(skb, vlan_tci)
+#endif
+
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                                struct net_device *dev,
+                                const unsigned char *addr, u16 flags);
+#ifdef HAVE_FDB_DEL_NLATTR
+extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                                struct net_device *dev,
+                                const unsigned char *addr);
+#else
+extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+                                const unsigned char *addr);
+#endif
+#else
+extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+                                unsigned char *addr, u16 flags);
+extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+                                unsigned char *addr);
+#endif
+#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add
+#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del
+#endif /* HAVE_FDB_OPS */
+
+#ifndef PCI_DEVID
+#define PCI_DEVID(bus, devfn)  ((((u16)(bus)) << 8) | (devfn))
+#endif
+#else /* >= 3.10.0 */
+#define HAVE_ENCAP_TSO_OFFLOAD
+#define USE_DEFAULT_FDB_DEL_DUMP
+#define HAVE_SKB_INNER_NETWORK_HEADER
+#endif /* >= 3.10.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) )
+#else /* >= 3.11.0 */
+#define HAVE_NDO_SET_VF_LINK_STATE
+#define HAVE_SKB_INNER_PROTOCOL
+#endif /* >= 3.11.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
+extern int __kc_pcie_get_minimum_link(struct pci_dev *dev,
+                                     enum pci_bus_speed *speed,
+                                     enum pcie_link_width *width);
+#ifndef pcie_get_minimum_link
+#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w)
+#endif
+#else /* >= 3.12.0 */
+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif
+#define HAVE_VXLAN_RX_OFFLOAD
+#define HAVE_NDO_GET_PHYS_PORT_ID
+#endif /* >= 3.12.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
+#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m)
+extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask);
+#ifndef u64_stats_init
+#define u64_stats_init(a) do { } while(0)
+#endif
+#ifndef BIT_ULL
+#define BIT_ULL(n) (1ULL << (n))
+#endif
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0))
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#endif
+
+#else /* >= 3.13.0 */
+#define HAVE_VXLAN_CHECKS
+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#else
+#define HAVE_NDO_SELECT_QUEUE_ACCEL
+#endif
+#define HAVE_NET_GET_RANDOM_ONCE
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
+
+#ifndef U32_MAX
+#define U32_MAX ((u32)~0U)
+#endif
+
+#define dev_consume_skb_any(x) dev_kfree_skb_any(x)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \
+     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)))
+
+/* it isn't expected that this would be a #define unless we made it so */
+#ifndef skb_set_hash
+
+#define PKT_HASH_TYPE_NONE     0
+#define PKT_HASH_TYPE_L2       1
+#define PKT_HASH_TYPE_L3       2
+#define PKT_HASH_TYPE_L4       3
+
+#define skb_set_hash __kc_skb_set_hash
+static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb,
+                                    u32 __maybe_unused hash,
+                                    int __maybe_unused type)
+{
+#ifdef HAVE_SKB_L4_RXHASH
+       skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
+#endif
+#ifdef NETIF_F_RXHASH
+       skb->rxhash = hash;
+#endif
+}
+#endif /* !skb_set_hash */
+
+#else
+
+#ifndef HAVE_ENCAP_TSO_OFFLOAD
+#define HAVE_ENCAP_TSO_OFFLOAD
+#endif /* HAVE_ENCAP_TSO_OFFLOAD */
+
+#ifndef HAVE_VXLAN_RX_OFFLOAD
+#define HAVE_VXLAN_RX_OFFLOAD
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+#ifndef HAVE_VXLAN_CHECKS
+#define HAVE_VXLAN_CHECKS
+#endif /* HAVE_VXLAN_CHECKS */
+#endif /* !(RHEL_RELEASE_CODE&&RHEL_RELEASE_CODE>=RHEL_RELEASE_VERSION(7,0)) */
+
+#ifndef pci_enable_msix_range
+extern int __kc_pci_enable_msix_range(struct pci_dev *dev,
+                                     struct msix_entry *entries,
+                                     int minvec, int maxvec);
+#define pci_enable_msix_range __kc_pci_enable_msix_range
+#endif
+
+#ifndef ether_addr_copy
+#define ether_addr_copy __kc_ether_addr_copy
+static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+       *(u32 *)dst = *(const u32 *)src;
+       *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
+#else
+       u16 *a = (u16 *)dst;
+       const u16 *b = (const u16 *)src;
+
+       a[0] = b[0];
+       a[1] = b[1];
+       a[2] = b[2];
+#endif
+}
+#endif /* ether_addr_copy */
+
+#else /* >= 3.14.0 */
+
+/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */
+#ifndef HAVE_NDO_DFWD_OPS
+#define HAVE_NDO_DFWD_OPS
+#endif
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif /* 3.14.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) )
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \
+     !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30)))
+#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh
+#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh
+#endif
+
+#else
+#define HAVE_PTP_1588_CLOCK_PINS
+#define HAVE_NETDEV_PORT
+#endif /* 3.15.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) )
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic()  smp_mb()
+#endif
+#ifndef __dev_uc_sync
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list,
+               struct net_device *dev,
+               int (*sync)(struct net_device *, const unsigned char *),
+               int (*unsync)(struct net_device *, const unsigned char *));
+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
+               struct net_device *dev,
+               int (*unsync)(struct net_device *, const unsigned char *));
+#endif
+#ifndef NETDEV_HW_ADDR_T_MULTICAST
+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count,
+               struct net_device *dev,
+               int (*sync)(struct net_device *, const unsigned char *),
+               int (*unsync)(struct net_device *, const unsigned char *));
+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
+               struct net_device *dev,
+               int (*unsync)(struct net_device *, const unsigned char *));
+#endif
+#endif /* HAVE_SET_RX_MODE */
+
+static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev,
+                                  int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
+                                  int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+       return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
+#elif defined(HAVE_SET_RX_MODE)
+       return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count,
+                                     dev, sync, unsync);
+#else
+       return 0;
+#endif
+}
+#define __dev_uc_sync __kc_dev_uc_sync
+
+static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev,
+                                     int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+       __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync);
+#else /* NETDEV_HW_ADDR_T_MULTICAST */
+       __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync);
+#endif /* NETDEV_HW_ADDR_T_UNICAST */
+#endif /* HAVE_SET_RX_MODE */
+}
+#define __dev_uc_unsync __kc_dev_uc_unsync
+
+static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev,
+                                  int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
+                                  int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+       return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
+#elif defined(HAVE_SET_RX_MODE)
+       return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count,
+                                     dev, sync, unsync);
+#else
+       return 0;
+#endif
+
+}
+#define __dev_mc_sync __kc_dev_mc_sync
+
+static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev,
+                                     int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+       __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync);
+#else /* NETDEV_HW_ADDR_T_MULTICAST */
+       __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync);
+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
+#endif /* HAVE_SET_RX_MODE */
+}
+#define __dev_mc_unsync __kc_dev_mc_unsync
+#endif /* __dev_uc_sync */
+
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#endif
+
+#else
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#endif /* 3.16.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) )
+#ifndef timespec64
+#define timespec64 timespec
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+       return ts;
+}
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+       return ts64;
+}
+#define timespec64_equal timespec_equal
+#define timespec64_compare timespec_compare
+#define set_normalized_timespec64 set_normalized_timespec
+#define timespec64_add_safe timespec_add_safe
+#define timespec64_add timespec_add
+#define timespec64_sub timespec_sub
+#define timespec64_valid timespec_valid
+#define timespec64_valid_strict timespec_valid_strict
+#define timespec64_to_ns timespec_to_ns
+#define ns_to_timespec64 ns_to_timespec
+#define ktime_to_timespec64 ktime_to_timespec
+#define timespec64_add_ns timespec_add_ns
+#endif /* timespec64 */
+#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a)
+#else
+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#endif /* 3.17.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
+#ifndef NO_PTP_SUPPORT
+#include <linux/errqueue.h>
+extern struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb);
+extern void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
+                               struct skb_shared_hwtstamps *hwtstamps);
+#define skb_clone_sk __kc_skb_clone_sk
+#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp
+#endif
+extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len);
+#define eth_get_headlen __kc_eth_get_headlen
+#ifndef ETH_P_XDSA
+#define ETH_P_XDSA 0x00F8
+#endif
+#else /*  3.18.0 */
+#define HAVE_SKBUFF_CSUM_LEVEL
+#define HAVE_SKB_XMIT_MORE
+#define HAVE_SKB_INNER_PROTOCOL_TYPE
+#endif /* 3.18.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) )
+#else
+#define HAVE_NDO_FEATURES_CHECK
+#endif /* 3.18.4 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
+/* netdev_phys_port_id renamed to netdev_phys_item_id */
+#define netdev_phys_item_id netdev_phys_port_id
+
+#ifndef NETDEV_RSS_KEY_LEN
+#define NETDEV_RSS_KEY_LEN (13 * 4)
+#endif
+#if ( !(RHEL_RELEASE_CODE && \
+       (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && \
+       (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) )
+#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len)
+#endif /* RHEL_RELEASE_CODE */
+extern void __kc_netdev_rss_key_fill(void *buffer, size_t len);
+#define SPEED_20000 20000
+#define SPEED_40000 40000
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+#ifndef dev_alloc_pages
+#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order))
+#endif
+#ifndef dev_alloc_page
+#define dev_alloc_page() dev_alloc_pages(0)
+#endif
+#if !defined(eth_skb_pad) && !defined(skb_put_padto)
+/**
+ *     __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size
+ *     @skb: buffer to pad
+ *     @len: minimal length
+ *
+ *     Pads up a buffer to ensure the trailing bytes exist and are
+ *     blanked. If the buffer already contains sufficient data it
+ *     is untouched. Otherwise it is extended. Returns zero on
+ *     success. The skb is freed on error.
+ */
+static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+       unsigned int size = skb->len;
+
+       if (unlikely(size < len)) {
+               len -= size;
+               if (skb_pad(skb, len))
+                       return -ENOMEM;
+               __skb_put(skb, len);
+       }
+       return 0;
+}
+#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len)
+
+static inline int __kc_eth_skb_pad(struct sk_buff *skb)
+{
+       return __kc_skb_put_padto(skb, ETH_ZLEN);
+}
+#define eth_skb_pad(skb) __kc_eth_skb_pad(skb)
+#endif /* eth_skb_pad && skb_put_padto */
+
+#ifndef napi_alloc_skb
+static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length)
+{
+       return netdev_alloc_skb_ip_align(napi->dev, length);
+}
+#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len)
+#endif /* napi_alloc_skb */
+#define HAVE_CONFIG_PM_RUNTIME
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define NDO_BRIDGE_GETLINK_HAS_FILTER_MASK_PARAM
+#define HAVE_RXFH_HASHFUNC
+#endif /* RHEL_RELEASE_CODE */
+#else /* 3.19.0 */
+#define HAVE_NDO_FDB_ADD_VID
+#define HAVE_RXFH_HASHFUNC
+#endif /* 3.19.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) )
+/* vlan_tx_xx functions got renamed to skb_vlan */
+#ifndef skb_vlan_tag_get
+#define skb_vlan_tag_get vlan_tx_tag_get
+#endif
+#ifndef skb_vlan_tag_present
+#define skb_vlan_tag_present vlan_tx_tag_present
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#endif
+#else
+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+#endif /* 3.20.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) )
+#ifndef NO_PTP_SUPPORT
+#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#include <linux/timecounter.h>
+#else
+#include <linux/clocksource.h>
+#endif
+static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+       tc->nsec += delta;
+}
+#define timecounter_adjtime __kc_timecounter_adjtime
+#endif
+#else
+#define HAVE_PTP_CLOCK_INFO_GETTIME64
+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#endif /* 4,1,0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9))
+#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)))
+static inline bool page_is_pfmemalloc(struct page __maybe_unused *page)
+{
+#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC
+       return page->pfmemalloc;
+#else
+       return false;
+#endif
+}
+#endif /* !SLES12sp1 */
+#else
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#endif /* 4.1.9 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0))
+#else
+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+#endif /* 4.2.0 */
+
+#endif /* _KCOMPAT_H_ */